1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <drm/drm_crtc.h>
8#include <drm/drm_flip_work.h>
9#include <drm/drm_managed.h>
10#include <drm/drm_mode.h>
11#include <drm/drm_probe_helper.h>
12#include <drm/drm_vblank.h>
13
14#include "mdp4_kms.h"
15#include "msm_gem.h"
16
17struct mdp4_crtc {
18	struct drm_crtc base;
19	char name[8];
20	int id;
21	int ovlp;
22	enum mdp4_dma dma;
23	bool enabled;
24
25	/* which mixer/encoder we route output to: */
26	int mixer;
27
28	struct {
29		spinlock_t lock;
30		bool stale;
31		uint32_t width, height;
32		uint32_t x, y;
33
34		/* next cursor to scan-out: */
35		uint32_t next_iova;
36		struct drm_gem_object *next_bo;
37
38		/* current cursor being scanned out: */
39		struct drm_gem_object *scanout_bo;
40	} cursor;
41
42
43	/* if there is a pending flip, these will be non-null: */
44	struct drm_pending_vblank_event *event;
45
46	/* Bits have been flushed at the last commit,
47	 * used to decide if a vsync has happened since last commit.
48	 */
49	u32 flushed_mask;
50
51#define PENDING_CURSOR 0x1
52#define PENDING_FLIP   0x2
53	atomic_t pending;
54
55	/* for unref'ing cursor bo's after scanout completes: */
56	struct drm_flip_work unref_cursor_work;
57
58	struct mdp_irq vblank;
59	struct mdp_irq err;
60};
61#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
62
63static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
64{
65	struct msm_drm_private *priv = crtc->dev->dev_private;
66	return to_mdp4_kms(to_mdp_kms(priv->kms));
67}
68
69static void request_pending(struct drm_crtc *crtc, uint32_t pending)
70{
71	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
72
73	atomic_or(pending, &mdp4_crtc->pending);
74	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
75}
76
77static void crtc_flush(struct drm_crtc *crtc)
78{
79	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
80	struct mdp4_kms *mdp4_kms = get_kms(crtc);
81	struct drm_plane *plane;
82	uint32_t flush = 0;
83
84	drm_atomic_crtc_for_each_plane(plane, crtc) {
85		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
86		flush |= pipe2flush(pipe_id);
87	}
88
89	flush |= ovlp2flush(mdp4_crtc->ovlp);
90
91	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
92
93	mdp4_crtc->flushed_mask = flush;
94
95	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
96}
97
98/* if file!=NULL, this is preclose potential cancel-flip path */
99static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
100{
101	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
102	struct drm_device *dev = crtc->dev;
103	struct drm_pending_vblank_event *event;
104	unsigned long flags;
105
106	spin_lock_irqsave(&dev->event_lock, flags);
107	event = mdp4_crtc->event;
108	if (event) {
109		mdp4_crtc->event = NULL;
110		DBG("%s: send event: %p", mdp4_crtc->name, event);
111		drm_crtc_send_vblank_event(crtc, event);
112	}
113	spin_unlock_irqrestore(&dev->event_lock, flags);
114}
115
116static void unref_cursor_worker(struct drm_flip_work *work, void *val)
117{
118	struct mdp4_crtc *mdp4_crtc =
119		container_of(work, struct mdp4_crtc, unref_cursor_work);
120	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
121	struct msm_kms *kms = &mdp4_kms->base.base;
122
123	msm_gem_unpin_iova(val, kms->aspace);
124	drm_gem_object_put(val);
125}
126
127/* statically (for now) map planes to mixer stage (z-order): */
128static const int idxs[] = {
129		[VG1]  = 1,
130		[VG2]  = 2,
131		[RGB1] = 0,
132		[RGB2] = 0,
133		[RGB3] = 0,
134		[VG3]  = 3,
135		[VG4]  = 4,
136
137};
138
139/* setup mixer config, for which we need to consider all crtc's and
140 * the planes attached to them
141 *
142 * TODO may possibly need some extra locking here
143 */
144static void setup_mixer(struct mdp4_kms *mdp4_kms)
145{
146	struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
147	struct drm_crtc *crtc;
148	uint32_t mixer_cfg = 0;
149	static const enum mdp_mixer_stage_id stages[] = {
150			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
151	};
152
153	list_for_each_entry(crtc, &config->crtc_list, head) {
154		struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
155		struct drm_plane *plane;
156
157		drm_atomic_crtc_for_each_plane(plane, crtc) {
158			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
159			int idx = idxs[pipe_id];
160			mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
161					pipe_id, stages[idx]);
162		}
163	}
164
165	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
166}
167
168static void blend_setup(struct drm_crtc *crtc)
169{
170	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
171	struct mdp4_kms *mdp4_kms = get_kms(crtc);
172	struct drm_plane *plane;
173	int i, ovlp = mdp4_crtc->ovlp;
174	bool alpha[4]= { false, false, false, false };
175
176	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
177	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
178	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
179	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
180
181	drm_atomic_crtc_for_each_plane(plane, crtc) {
182		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
183		int idx = idxs[pipe_id];
184		if (idx > 0) {
185			const struct mdp_format *format =
186					to_mdp_format(msm_framebuffer_format(plane->state->fb));
187			alpha[idx-1] = format->alpha_enable;
188		}
189	}
190
191	for (i = 0; i < 4; i++) {
192		uint32_t op;
193
194		if (alpha[i]) {
195			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
196					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
197					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
198		} else {
199			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
200					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
201		}
202
203		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
204		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
205		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
206		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
207		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
208		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
209		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
210		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
211	}
212
213	setup_mixer(mdp4_kms);
214}
215
216static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
217{
218	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
219	struct mdp4_kms *mdp4_kms = get_kms(crtc);
220	enum mdp4_dma dma = mdp4_crtc->dma;
221	int ovlp = mdp4_crtc->ovlp;
222	struct drm_display_mode *mode;
223
224	if (WARN_ON(!crtc->state))
225		return;
226
227	mode = &crtc->state->adjusted_mode;
228
229	DBG("%s: set mode: " DRM_MODE_FMT,
230			mdp4_crtc->name, DRM_MODE_ARG(mode));
231
232	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
233			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
234			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
235
236	/* take data from pipe: */
237	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
238	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
239	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
240			MDP4_DMA_DST_SIZE_WIDTH(0) |
241			MDP4_DMA_DST_SIZE_HEIGHT(0));
242
243	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
244	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
245			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
246			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
247	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
248
249	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
250
251	if (dma == DMA_E) {
252		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
253		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
254		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
255	}
256}
257
258static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
259				     struct drm_atomic_state *state)
260{
261	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
262	struct mdp4_kms *mdp4_kms = get_kms(crtc);
263	unsigned long flags;
264
265	DBG("%s", mdp4_crtc->name);
266
267	if (WARN_ON(!mdp4_crtc->enabled))
268		return;
269
270	/* Disable/save vblank irq handling before power is disabled */
271	drm_crtc_vblank_off(crtc);
272
273	mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
274	mdp4_disable(mdp4_kms);
275
276	if (crtc->state->event && !crtc->state->active) {
277		WARN_ON(mdp4_crtc->event);
278		spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
279		drm_crtc_send_vblank_event(crtc, crtc->state->event);
280		crtc->state->event = NULL;
281		spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
282	}
283
284	mdp4_crtc->enabled = false;
285}
286
287static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
288				    struct drm_atomic_state *state)
289{
290	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
291	struct mdp4_kms *mdp4_kms = get_kms(crtc);
292
293	DBG("%s", mdp4_crtc->name);
294
295	if (WARN_ON(mdp4_crtc->enabled))
296		return;
297
298	mdp4_enable(mdp4_kms);
299
300	/* Restore vblank irq handling after power is enabled */
301	drm_crtc_vblank_on(crtc);
302
303	mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
304
305	crtc_flush(crtc);
306
307	mdp4_crtc->enabled = true;
308}
309
310static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
311		struct drm_atomic_state *state)
312{
313	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
314	DBG("%s: check", mdp4_crtc->name);
315	// TODO anything else to check?
316	return 0;
317}
318
319static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
320				   struct drm_atomic_state *state)
321{
322	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
323	DBG("%s: begin", mdp4_crtc->name);
324}
325
326static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
327				   struct drm_atomic_state *state)
328{
329	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
330	struct drm_device *dev = crtc->dev;
331	unsigned long flags;
332
333	DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
334
335	WARN_ON(mdp4_crtc->event);
336
337	spin_lock_irqsave(&dev->event_lock, flags);
338	mdp4_crtc->event = crtc->state->event;
339	crtc->state->event = NULL;
340	spin_unlock_irqrestore(&dev->event_lock, flags);
341
342	blend_setup(crtc);
343	crtc_flush(crtc);
344	request_pending(crtc, PENDING_FLIP);
345}
346
347#define CURSOR_WIDTH 64
348#define CURSOR_HEIGHT 64
349
350/* called from IRQ to update cursor related registers (if needed).  The
351 * cursor registers, other than x/y position, appear not to be double
352 * buffered, and changing them other than from vblank seems to trigger
353 * underflow.
354 */
355static void update_cursor(struct drm_crtc *crtc)
356{
357	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
358	struct mdp4_kms *mdp4_kms = get_kms(crtc);
359	struct msm_kms *kms = &mdp4_kms->base.base;
360	enum mdp4_dma dma = mdp4_crtc->dma;
361	unsigned long flags;
362
363	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
364	if (mdp4_crtc->cursor.stale) {
365		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
366		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
367		uint64_t iova = mdp4_crtc->cursor.next_iova;
368
369		if (next_bo) {
370			/* take a obj ref + iova ref when we start scanning out: */
371			drm_gem_object_get(next_bo);
372			msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
373
374			/* enable cursor: */
375			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
376					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
377					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
378			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
379			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
380					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
381					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
382		} else {
383			/* disable cursor: */
384			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
385					mdp4_kms->blank_cursor_iova);
386		}
387
388		/* and drop the iova ref + obj rev when done scanning out: */
389		if (prev_bo)
390			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
391
392		mdp4_crtc->cursor.scanout_bo = next_bo;
393		mdp4_crtc->cursor.stale = false;
394	}
395
396	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
397			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
398			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
399
400	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
401}
402
403static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
404		struct drm_file *file_priv, uint32_t handle,
405		uint32_t width, uint32_t height)
406{
407	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
408	struct mdp4_kms *mdp4_kms = get_kms(crtc);
409	struct msm_kms *kms = &mdp4_kms->base.base;
410	struct drm_device *dev = crtc->dev;
411	struct drm_gem_object *cursor_bo, *old_bo;
412	unsigned long flags;
413	uint64_t iova;
414	int ret;
415
416	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
417		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
418		return -EINVAL;
419	}
420
421	if (handle) {
422		cursor_bo = drm_gem_object_lookup(file_priv, handle);
423		if (!cursor_bo)
424			return -ENOENT;
425	} else {
426		cursor_bo = NULL;
427	}
428
429	if (cursor_bo) {
430		ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
431		if (ret)
432			goto fail;
433	} else {
434		iova = 0;
435	}
436
437	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
438	old_bo = mdp4_crtc->cursor.next_bo;
439	mdp4_crtc->cursor.next_bo   = cursor_bo;
440	mdp4_crtc->cursor.next_iova = iova;
441	mdp4_crtc->cursor.width     = width;
442	mdp4_crtc->cursor.height    = height;
443	mdp4_crtc->cursor.stale     = true;
444	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
445
446	if (old_bo) {
447		/* drop our previous reference: */
448		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
449	}
450
451	request_pending(crtc, PENDING_CURSOR);
452
453	return 0;
454
455fail:
456	drm_gem_object_put(cursor_bo);
457	return ret;
458}
459
460static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
461{
462	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
463	unsigned long flags;
464
465	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
466	mdp4_crtc->cursor.x = x;
467	mdp4_crtc->cursor.y = y;
468	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
469
470	crtc_flush(crtc);
471	request_pending(crtc, PENDING_CURSOR);
472
473	return 0;
474}
475
476static const struct drm_crtc_funcs mdp4_crtc_funcs = {
477	.set_config = drm_atomic_helper_set_config,
478	.page_flip = drm_atomic_helper_page_flip,
479	.cursor_set = mdp4_crtc_cursor_set,
480	.cursor_move = mdp4_crtc_cursor_move,
481	.reset = drm_atomic_helper_crtc_reset,
482	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
483	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
484	.enable_vblank  = msm_crtc_enable_vblank,
485	.disable_vblank = msm_crtc_disable_vblank,
486};
487
488static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
489	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
490	.atomic_check = mdp4_crtc_atomic_check,
491	.atomic_begin = mdp4_crtc_atomic_begin,
492	.atomic_flush = mdp4_crtc_atomic_flush,
493	.atomic_enable = mdp4_crtc_atomic_enable,
494	.atomic_disable = mdp4_crtc_atomic_disable,
495};
496
497static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
498{
499	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
500	struct drm_crtc *crtc = &mdp4_crtc->base;
501	struct msm_drm_private *priv = crtc->dev->dev_private;
502	unsigned pending;
503
504	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
505
506	pending = atomic_xchg(&mdp4_crtc->pending, 0);
507
508	if (pending & PENDING_FLIP) {
509		complete_flip(crtc, NULL);
510	}
511
512	if (pending & PENDING_CURSOR) {
513		update_cursor(crtc);
514		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
515	}
516}
517
518static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
519{
520	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
521	struct drm_crtc *crtc = &mdp4_crtc->base;
522	DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
523	crtc_flush(crtc);
524}
525
526static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
527{
528	struct drm_device *dev = crtc->dev;
529	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
530	struct mdp4_kms *mdp4_kms = get_kms(crtc);
531	int ret;
532
533	ret = drm_crtc_vblank_get(crtc);
534	if (ret)
535		return;
536
537	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
538		!(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
539			mdp4_crtc->flushed_mask),
540		msecs_to_jiffies(50));
541	if (ret <= 0)
542		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
543
544	mdp4_crtc->flushed_mask = 0;
545
546	drm_crtc_vblank_put(crtc);
547}
548
549uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
550{
551	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
552	return mdp4_crtc->vblank.irqmask;
553}
554
555/* set dma config, ie. the format the encoder wants. */
556void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
557{
558	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
559	struct mdp4_kms *mdp4_kms = get_kms(crtc);
560
561	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
562}
563
564/* set interface for routing crtc->encoder: */
565void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
566{
567	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
568	struct mdp4_kms *mdp4_kms = get_kms(crtc);
569	uint32_t intf_sel;
570
571	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
572
573	switch (mdp4_crtc->dma) {
574	case DMA_P:
575		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
576		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
577		break;
578	case DMA_S:
579		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
580		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
581		break;
582	case DMA_E:
583		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
584		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
585		break;
586	}
587
588	if (intf == INTF_DSI_VIDEO) {
589		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
590		intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
591	} else if (intf == INTF_DSI_CMD) {
592		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
593		intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
594	}
595
596	mdp4_crtc->mixer = mixer;
597
598	blend_setup(crtc);
599
600	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
601
602	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
603}
604
605void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
606{
607	/* wait_for_flush_done is the only case for now.
608	 * Later we will have command mode CRTC to wait for
609	 * other event.
610	 */
611	mdp4_crtc_wait_for_flush_done(crtc);
612}
613
614static const char *dma_names[] = {
615		"DMA_P", "DMA_S", "DMA_E",
616};
617
618static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
619{
620	struct mdp4_crtc *mdp4_crtc = ptr;
621
622	drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
623}
624
625/* initialize crtc */
626struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
627		struct drm_plane *plane, int id, int ovlp_id,
628		enum mdp4_dma dma_id)
629{
630	struct drm_crtc *crtc = NULL;
631	struct mdp4_crtc *mdp4_crtc;
632	int ret;
633
634	mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
635						plane, NULL,
636						&mdp4_crtc_funcs, NULL);
637	if (IS_ERR(mdp4_crtc))
638		return ERR_CAST(mdp4_crtc);
639
640	crtc = &mdp4_crtc->base;
641
642	mdp4_crtc->id = id;
643
644	mdp4_crtc->ovlp = ovlp_id;
645	mdp4_crtc->dma = dma_id;
646
647	mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
648	mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
649
650	mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
651	mdp4_crtc->err.irq = mdp4_crtc_err_irq;
652
653	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
654			dma_names[dma_id], ovlp_id);
655
656	spin_lock_init(&mdp4_crtc->cursor.lock);
657
658	drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
659			"unref cursor", unref_cursor_worker);
660	ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
661	if (ret)
662		return ERR_PTR(ret);
663
664	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
665
666	return crtc;
667}
668