1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4 * Author: James.Qian.Wang <james.qian.wang@arm.com>
5 *
6 */
7
8#include <drm/drm_blend.h>
9#include <drm/drm_print.h>
10#include "d71_dev.h"
11#include "malidp_io.h"
12
13static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
14{
15	u32 __iomem *reg = d71_pipeline->lpu_addr;
16	u32 status, raw_status;
17	u64 evts = 0ULL;
18
19	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
20	if (raw_status & LPU_IRQ_IBSY)
21		evts |= KOMEDA_EVENT_IBSY;
22	if (raw_status & LPU_IRQ_EOW)
23		evts |= KOMEDA_EVENT_EOW;
24	if (raw_status & LPU_IRQ_OVR)
25		evts |= KOMEDA_EVENT_OVR;
26
27	if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
28		u32 restore = 0, tbu_status;
29		/* Check error of LPU status */
30		status = malidp_read32(reg, BLK_STATUS);
31		if (status & LPU_STATUS_AXIE) {
32			restore |= LPU_STATUS_AXIE;
33			evts |= KOMEDA_ERR_AXIE;
34		}
35		if (status & LPU_STATUS_ACE0) {
36			restore |= LPU_STATUS_ACE0;
37			evts |= KOMEDA_ERR_ACE0;
38		}
39		if (status & LPU_STATUS_ACE1) {
40			restore |= LPU_STATUS_ACE1;
41			evts |= KOMEDA_ERR_ACE1;
42		}
43		if (status & LPU_STATUS_ACE2) {
44			restore |= LPU_STATUS_ACE2;
45			evts |= KOMEDA_ERR_ACE2;
46		}
47		if (status & LPU_STATUS_ACE3) {
48			restore |= LPU_STATUS_ACE3;
49			evts |= KOMEDA_ERR_ACE3;
50		}
51		if (status & LPU_STATUS_FEMPTY) {
52			restore |= LPU_STATUS_FEMPTY;
53			evts |= KOMEDA_EVENT_EMPTY;
54		}
55		if (status & LPU_STATUS_FFULL) {
56			restore |= LPU_STATUS_FFULL;
57			evts |= KOMEDA_EVENT_FULL;
58		}
59
60		if (restore != 0)
61			malidp_write32_mask(reg, BLK_STATUS, restore, 0);
62
63		restore = 0;
64		/* Check errors of TBU status */
65		tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
66		if (tbu_status & LPU_TBU_STATUS_TCF) {
67			restore |= LPU_TBU_STATUS_TCF;
68			evts |= KOMEDA_ERR_TCF;
69		}
70		if (tbu_status & LPU_TBU_STATUS_TTNG) {
71			restore |= LPU_TBU_STATUS_TTNG;
72			evts |= KOMEDA_ERR_TTNG;
73		}
74		if (tbu_status & LPU_TBU_STATUS_TITR) {
75			restore |= LPU_TBU_STATUS_TITR;
76			evts |= KOMEDA_ERR_TITR;
77		}
78		if (tbu_status & LPU_TBU_STATUS_TEMR) {
79			restore |= LPU_TBU_STATUS_TEMR;
80			evts |= KOMEDA_ERR_TEMR;
81		}
82		if (tbu_status & LPU_TBU_STATUS_TTF) {
83			restore |= LPU_TBU_STATUS_TTF;
84			evts |= KOMEDA_ERR_TTF;
85		}
86		if (restore != 0)
87			malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
88	}
89
90	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
91	return evts;
92}
93
94static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
95{
96	u32 __iomem *reg = d71_pipeline->cu_addr;
97	u32 status, raw_status;
98	u64 evts = 0ULL;
99
100	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
101	if (raw_status & CU_IRQ_OVR)
102		evts |= KOMEDA_EVENT_OVR;
103
104	if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
105		status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
106		if (status & CU_STATUS_CPE)
107			evts |= KOMEDA_ERR_CPE;
108		if (status & CU_STATUS_ZME)
109			evts |= KOMEDA_ERR_ZME;
110		if (status & CU_STATUS_CFGE)
111			evts |= KOMEDA_ERR_CFGE;
112		if (status)
113			malidp_write32_mask(reg, BLK_STATUS, status, 0);
114	}
115
116	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
117
118	return evts;
119}
120
121static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
122{
123	u32 __iomem *reg = d71_pipeline->dou_addr;
124	u32 status, raw_status;
125	u64 evts = 0ULL;
126
127	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
128	if (raw_status & DOU_IRQ_PL0)
129		evts |= KOMEDA_EVENT_VSYNC;
130	if (raw_status & DOU_IRQ_UND)
131		evts |= KOMEDA_EVENT_URUN;
132
133	if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
134		u32 restore  = 0;
135
136		status = malidp_read32(reg, BLK_STATUS);
137		if (status & DOU_STATUS_DRIFTTO) {
138			restore |= DOU_STATUS_DRIFTTO;
139			evts |= KOMEDA_ERR_DRIFTTO;
140		}
141		if (status & DOU_STATUS_FRAMETO) {
142			restore |= DOU_STATUS_FRAMETO;
143			evts |= KOMEDA_ERR_FRAMETO;
144		}
145		if (status & DOU_STATUS_TETO) {
146			restore |= DOU_STATUS_TETO;
147			evts |= KOMEDA_ERR_TETO;
148		}
149		if (status & DOU_STATUS_CSCE) {
150			restore |= DOU_STATUS_CSCE;
151			evts |= KOMEDA_ERR_CSCE;
152		}
153
154		if (restore != 0)
155			malidp_write32_mask(reg, BLK_STATUS, restore, 0);
156	}
157
158	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
159	return evts;
160}
161
162static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
163{
164	u32 evts = 0ULL;
165
166	if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
167		evts |= get_lpu_event(d71_pipeline);
168
169	if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
170		evts |= get_cu_event(d71_pipeline);
171
172	if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
173		evts |= get_dou_event(d71_pipeline);
174
175	return evts;
176}
177
178static irqreturn_t
179d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
180{
181	struct d71_dev *d71 = mdev->chip_data;
182	u32 status, gcu_status, raw_status;
183
184	gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
185
186	if (gcu_status & GLB_IRQ_STATUS_GCU) {
187		raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
188		if (raw_status & GCU_IRQ_CVAL0)
189			evts->pipes[0] |= KOMEDA_EVENT_FLIP;
190		if (raw_status & GCU_IRQ_CVAL1)
191			evts->pipes[1] |= KOMEDA_EVENT_FLIP;
192		if (raw_status & GCU_IRQ_ERR) {
193			status = malidp_read32(d71->gcu_addr, BLK_STATUS);
194			if (status & GCU_STATUS_MERR) {
195				evts->global |= KOMEDA_ERR_MERR;
196				malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
197						    GCU_STATUS_MERR, 0);
198			}
199		}
200
201		malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
202	}
203
204	if (gcu_status & GLB_IRQ_STATUS_PIPE0)
205		evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
206
207	if (gcu_status & GLB_IRQ_STATUS_PIPE1)
208		evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
209
210	return IRQ_RETVAL(gcu_status);
211}
212
213#define ENABLED_GCU_IRQS	(GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
214				 GCU_IRQ_MODE | GCU_IRQ_ERR)
215#define ENABLED_LPU_IRQS	(LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
216#define ENABLED_CU_IRQS		(CU_IRQ_OVR | CU_IRQ_ERR)
217#define ENABLED_DOU_IRQS	(DOU_IRQ_UND | DOU_IRQ_ERR)
218
219static int d71_enable_irq(struct komeda_dev *mdev)
220{
221	struct d71_dev *d71 = mdev->chip_data;
222	struct d71_pipeline *pipe;
223	u32 i;
224
225	malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
226			    ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
227	for (i = 0; i < d71->num_pipelines; i++) {
228		pipe = d71->pipes[i];
229		malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
230				    ENABLED_CU_IRQS, ENABLED_CU_IRQS);
231		malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
232				    ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
233		malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
234				    ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
235	}
236	return 0;
237}
238
239static int d71_disable_irq(struct komeda_dev *mdev)
240{
241	struct d71_dev *d71 = mdev->chip_data;
242	struct d71_pipeline *pipe;
243	u32 i;
244
245	malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
246	for (i = 0; i < d71->num_pipelines; i++) {
247		pipe = d71->pipes[i];
248		malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
249				    ENABLED_CU_IRQS, 0);
250		malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
251				    ENABLED_LPU_IRQS, 0);
252		malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
253				    ENABLED_DOU_IRQS, 0);
254	}
255	return 0;
256}
257
258static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
259{
260	struct d71_dev *d71 = mdev->chip_data;
261	struct d71_pipeline *pipe = d71->pipes[master_pipe];
262
263	malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
264			    DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
265}
266
267static int to_d71_opmode(int core_mode)
268{
269	switch (core_mode) {
270	case KOMEDA_MODE_DISP0:
271		return DO0_ACTIVE_MODE;
272	case KOMEDA_MODE_DISP1:
273		return DO1_ACTIVE_MODE;
274	case KOMEDA_MODE_DUAL_DISP:
275		return DO01_ACTIVE_MODE;
276	case KOMEDA_MODE_INACTIVE:
277		return INACTIVE_MODE;
278	default:
279		WARN(1, "Unknown operation mode");
280		return INACTIVE_MODE;
281	}
282}
283
284static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
285{
286	struct d71_dev *d71 = mdev->chip_data;
287	u32 opmode = to_d71_opmode(new_mode);
288	int ret;
289
290	malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
291
292	ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
293			   100, 1000, 10000);
294
295	return ret;
296}
297
298static void d71_flush(struct komeda_dev *mdev,
299		      int master_pipe, u32 active_pipes)
300{
301	struct d71_dev *d71 = mdev->chip_data;
302	u32 reg_offset = (master_pipe == 0) ?
303			 GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
304
305	malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
306}
307
308static int d71_reset(struct d71_dev *d71)
309{
310	u32 __iomem *gcu = d71->gcu_addr;
311	int ret;
312
313	malidp_write32(gcu, BLK_CONTROL, GCU_CONTROL_SRST);
314
315	ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
316			   100, 1000, 10000);
317
318	return ret;
319}
320
321void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
322{
323	int i;
324
325	blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
326	if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
327		return;
328
329	blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
330
331	/* get valid input and output ids */
332	for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
333		blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
334	for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
335		blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
336}
337
338static void d71_cleanup(struct komeda_dev *mdev)
339{
340	struct d71_dev *d71 = mdev->chip_data;
341
342	if (!d71)
343		return;
344
345	devm_kfree(mdev->dev, d71);
346	mdev->chip_data = NULL;
347}
348
349static int d71_enum_resources(struct komeda_dev *mdev)
350{
351	struct d71_dev *d71;
352	struct komeda_pipeline *pipe;
353	struct block_header blk;
354	u32 __iomem *blk_base;
355	u32 i, value, offset;
356	int err;
357
358	d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
359	if (!d71)
360		return -ENOMEM;
361
362	mdev->chip_data = d71;
363	d71->mdev = mdev;
364	d71->gcu_addr = mdev->reg_base;
365	d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
366
367	err = d71_reset(d71);
368	if (err) {
369		DRM_ERROR("Fail to reset d71 device.\n");
370		goto err_cleanup;
371	}
372
373	/* probe GCU */
374	value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
375	d71->num_blocks = value & 0xFF;
376	d71->num_pipelines = (value >> 8) & 0x7;
377
378	if (d71->num_pipelines > D71_MAX_PIPELINE) {
379		DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
380			  D71_MAX_PIPELINE, d71->num_pipelines);
381		err = -EINVAL;
382		goto err_cleanup;
383	}
384
385	/* Only the legacy HW has the periph block, the newer merges the periph
386	 * into GCU
387	 */
388	value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
389	if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
390		d71->periph_addr = NULL;
391
392	if (d71->periph_addr) {
393		/* probe PERIPHERAL in legacy HW */
394		value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
395
396		d71->max_line_size	= value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
397		d71->max_vsize		= 4096;
398		d71->num_rich_layers	= value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
399		d71->supports_dual_link	= !!(value & PERIPH_SPLIT_EN);
400		d71->integrates_tbu	= !!(value & PERIPH_TBU_EN);
401	} else {
402		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
403		d71->max_line_size	= GCU_MAX_LINE_SIZE(value);
404		d71->max_vsize		= GCU_MAX_NUM_LINES(value);
405
406		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
407		d71->num_rich_layers	= GCU_NUM_RICH_LAYERS(value);
408		d71->supports_dual_link	= GCU_DISPLAY_SPLIT_EN(value);
409		d71->integrates_tbu	= GCU_DISPLAY_TBU_EN(value);
410	}
411
412	for (i = 0; i < d71->num_pipelines; i++) {
413		pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
414					   &d71_pipeline_funcs);
415		if (IS_ERR(pipe)) {
416			err = PTR_ERR(pipe);
417			goto err_cleanup;
418		}
419
420		/* D71 HW doesn't update shadow registers when display output
421		 * is turning off, so when we disable all pipeline components
422		 * together with display output disable by one flush or one
423		 * operation, the disable operation updated registers will not
424		 * be flush to or valid in HW, which may leads problem.
425		 * To workaround this problem, introduce a two phase disable.
426		 * Phase1: Disabling components with display is on to make sure
427		 *	   the disable can be flushed to HW.
428		 * Phase2: Only turn-off display output.
429		 */
430		value = KOMEDA_PIPELINE_IMPROCS |
431			BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
432
433		pipe->standalone_disabled_comps = value;
434
435		d71->pipes[i] = to_d71_pipeline(pipe);
436	}
437
438	/* loop the register blks and probe.
439	 * NOTE: d71->num_blocks includes reserved blocks.
440	 * d71->num_blocks = GCU + valid blocks + reserved blocks
441	 */
442	i = 1; /* exclude GCU */
443	offset = D71_BLOCK_SIZE; /* skip GCU */
444	while (i < d71->num_blocks) {
445		blk_base = mdev->reg_base + (offset >> 2);
446
447		d71_read_block_header(blk_base, &blk);
448		if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
449			err = d71_probe_block(d71, &blk, blk_base);
450			if (err)
451				goto err_cleanup;
452		}
453
454		i++;
455		offset += D71_BLOCK_SIZE;
456	}
457
458	DRM_DEBUG("total %d (out of %d) blocks are found.\n",
459		  i, d71->num_blocks);
460
461	return 0;
462
463err_cleanup:
464	d71_cleanup(mdev);
465	return err;
466}
467
468#define __HW_ID(__group, __format) \
469	((((__group) & 0x7) << 3) | ((__format) & 0x7))
470
471#define RICH		KOMEDA_FMT_RICH_LAYER
472#define SIMPLE		KOMEDA_FMT_SIMPLE_LAYER
473#define RICH_SIMPLE	(KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
474#define RICH_WB		(KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
475#define RICH_SIMPLE_WB	(RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
476
477#define Rot_0		DRM_MODE_ROTATE_0
478#define Flip_H_V	(DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
479#define Rot_ALL_H_V	(DRM_MODE_ROTATE_MASK | Flip_H_V)
480
481#define LYT_NM		BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
482#define LYT_WB		BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
483#define LYT_NM_WB	(LYT_NM | LYT_WB)
484
485#define AFB_TH		AFBC(_TILED | _SPARSE)
486#define AFB_TH_SC_YTR	AFBC(_TILED | _SC | _SPARSE | _YTR)
487#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
488
489static struct komeda_format_caps d71_format_caps_table[] = {
490	/*   HW_ID    |        fourcc         |   layer_types |   rots    | afbc_layouts | afbc_features */
491	/* ABGR_2101010*/
492	{__HW_ID(0, 0),	DRM_FORMAT_ARGB2101010,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
493	{__HW_ID(0, 1),	DRM_FORMAT_ABGR2101010,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
494	{__HW_ID(0, 1),	DRM_FORMAT_ABGR2101010,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
495	{__HW_ID(0, 2),	DRM_FORMAT_RGBA1010102,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
496	{__HW_ID(0, 3),	DRM_FORMAT_BGRA1010102,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
497	/* ABGR_8888*/
498	{__HW_ID(1, 0),	DRM_FORMAT_ARGB8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
499	{__HW_ID(1, 1),	DRM_FORMAT_ABGR8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
500	{__HW_ID(1, 1),	DRM_FORMAT_ABGR8888,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
501	{__HW_ID(1, 2),	DRM_FORMAT_RGBA8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
502	{__HW_ID(1, 3),	DRM_FORMAT_BGRA8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
503	/* XBGB_8888 */
504	{__HW_ID(2, 0),	DRM_FORMAT_XRGB8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
505	{__HW_ID(2, 1),	DRM_FORMAT_XBGR8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
506	{__HW_ID(2, 2),	DRM_FORMAT_RGBX8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
507	{__HW_ID(2, 3),	DRM_FORMAT_BGRX8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
508	/* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
509	{__HW_ID(3, 0),	DRM_FORMAT_RGB888,	RICH_SIMPLE_WB,	Rot_0,			0, 0},
510	{__HW_ID(3, 1),	DRM_FORMAT_BGR888,	RICH_SIMPLE_WB,	Rot_0,			0, 0},
511	{__HW_ID(3, 1),	DRM_FORMAT_BGR888,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
512	/* BGR 16bpp */
513	{__HW_ID(4, 0),	DRM_FORMAT_RGBA5551,	RICH_SIMPLE,	Flip_H_V,		0, 0},
514	{__HW_ID(4, 1),	DRM_FORMAT_ABGR1555,	RICH_SIMPLE,	Flip_H_V,		0, 0},
515	{__HW_ID(4, 1),	DRM_FORMAT_ABGR1555,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
516	{__HW_ID(4, 2),	DRM_FORMAT_RGB565,	RICH_SIMPLE,	Flip_H_V,		0, 0},
517	{__HW_ID(4, 3),	DRM_FORMAT_BGR565,	RICH_SIMPLE,	Flip_H_V,		0, 0},
518	{__HW_ID(4, 3),	DRM_FORMAT_BGR565,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
519	{__HW_ID(4, 4), DRM_FORMAT_R8,		SIMPLE,		Rot_0,			0, 0},
520	/* YUV 444/422/420 8bit  */
521	{__HW_ID(5, 1),	DRM_FORMAT_YUYV,	RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH}, /* afbc */
522	{__HW_ID(5, 2),	DRM_FORMAT_YUYV,	RICH,		Flip_H_V,		0, 0},
523	{__HW_ID(5, 3),	DRM_FORMAT_UYVY,	RICH,		Flip_H_V,		0, 0},
524	{__HW_ID(5, 6),	DRM_FORMAT_NV12,	RICH_WB,	Flip_H_V,		0, 0},
525	{__HW_ID(5, 6),	DRM_FORMAT_YUV420_8BIT,	RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH}, /* afbc */
526	{__HW_ID(5, 7),	DRM_FORMAT_YUV420,	RICH,		Flip_H_V,		0, 0},
527	/* YUV 10bit*/
528	{__HW_ID(6, 6),	DRM_FORMAT_X0L2,	RICH,		Flip_H_V,		0, 0},
529	{__HW_ID(6, 7),	DRM_FORMAT_P010,	RICH,		Flip_H_V,		0, 0},
530	{__HW_ID(6, 7),	DRM_FORMAT_YUV420_10BIT, RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH},
531};
532
533static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
534				     u32 layer_type, u64 modifier, u32 rot)
535{
536	uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
537
538	if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
539	    drm_rotation_90_or_270(rot)) {
540		DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
541		return false;
542	}
543
544	return true;
545}
546
547static void d71_init_fmt_tbl(struct komeda_dev *mdev)
548{
549	struct komeda_format_caps_table *table = &mdev->fmt_tbl;
550
551	table->format_caps = d71_format_caps_table;
552	table->format_mod_supported = d71_format_mod_supported;
553	table->n_formats = ARRAY_SIZE(d71_format_caps_table);
554}
555
556static int d71_connect_iommu(struct komeda_dev *mdev)
557{
558	struct d71_dev *d71 = mdev->chip_data;
559	u32 __iomem *reg = d71->gcu_addr;
560	u32 check_bits = (d71->num_pipelines == 2) ?
561			 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
562	int i, ret;
563
564	if (!d71->integrates_tbu)
565		return -1;
566
567	malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
568
569	ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
570			100, 1000, 1000);
571	if (ret < 0) {
572		DRM_ERROR("timed out connecting to TCU!\n");
573		malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
574		return ret;
575	}
576
577	for (i = 0; i < d71->num_pipelines; i++)
578		malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
579				    LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
580	return 0;
581}
582
583static int d71_disconnect_iommu(struct komeda_dev *mdev)
584{
585	struct d71_dev *d71 = mdev->chip_data;
586	u32 __iomem *reg = d71->gcu_addr;
587	u32 check_bits = (d71->num_pipelines == 2) ?
588			 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
589	int ret;
590
591	malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
592
593	ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
594			100, 1000, 1000);
595	if (ret < 0) {
596		DRM_ERROR("timed out disconnecting from TCU!\n");
597		malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
598	}
599
600	return ret;
601}
602
603static const struct komeda_dev_funcs d71_chip_funcs = {
604	.init_format_table	= d71_init_fmt_tbl,
605	.enum_resources		= d71_enum_resources,
606	.cleanup		= d71_cleanup,
607	.irq_handler		= d71_irq_handler,
608	.enable_irq		= d71_enable_irq,
609	.disable_irq		= d71_disable_irq,
610	.on_off_vblank		= d71_on_off_vblank,
611	.change_opmode		= d71_change_opmode,
612	.flush			= d71_flush,
613	.connect_iommu		= d71_connect_iommu,
614	.disconnect_iommu	= d71_disconnect_iommu,
615	.dump_register		= d71_dump,
616};
617
618const struct komeda_dev_funcs *
619d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
620{
621	const struct komeda_dev_funcs *funcs;
622	u32 product_id;
623
624	chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
625
626	product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
627
628	switch (product_id) {
629	case MALIDP_D71_PRODUCT_ID:
630	case MALIDP_D32_PRODUCT_ID:
631		funcs = &d71_chip_funcs;
632		break;
633	default:
634		DRM_ERROR("Unsupported product: 0x%x\n", product_id);
635		return NULL;
636	}
637
638	chip->arch_id	= malidp_read32(reg_base, GLB_ARCH_ID);
639	chip->core_info	= malidp_read32(reg_base, GLB_CORE_INFO);
640	chip->bus_width	= D71_BUS_WIDTH_16_BYTES;
641
642	return funcs;
643}
644