1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#include <linux/sort.h>
9
10#include <drm/drm_atomic.h>
11#include <drm/drm_blend.h>
12#include <drm/drm_mode.h>
13#include <drm/drm_crtc.h>
14#include <drm/drm_flip_work.h>
15#include <drm/drm_fourcc.h>
16#include <drm/drm_managed.h>
17#include <drm/drm_probe_helper.h>
18#include <drm/drm_vblank.h>
19
20#include "mdp5_kms.h"
21#include "msm_gem.h"
22
23#define CURSOR_WIDTH	64
24#define CURSOR_HEIGHT	64
25
26struct mdp5_crtc {
27	struct drm_crtc base;
28	int id;
29	bool enabled;
30
31	spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
32
33	/* if there is a pending flip, these will be non-null: */
34	struct drm_pending_vblank_event *event;
35
36	/* Bits have been flushed at the last commit,
37	 * used to decide if a vsync has happened since last commit.
38	 */
39	u32 flushed_mask;
40
41#define PENDING_CURSOR 0x1
42#define PENDING_FLIP   0x2
43	atomic_t pending;
44
45	/* for unref'ing cursor bo's after scanout completes: */
46	struct drm_flip_work unref_cursor_work;
47
48	struct mdp_irq vblank;
49	struct mdp_irq err;
50	struct mdp_irq pp_done;
51
52	struct completion pp_completion;
53
54	bool lm_cursor_enabled;
55
56	struct {
57		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
58		spinlock_t lock;
59
60		/* current cursor being scanned out: */
61		struct drm_gem_object *scanout_bo;
62		uint64_t iova;
63		uint32_t width, height;
64		int x, y;
65	} cursor;
66};
67#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
68
69static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
70
71static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72{
73	struct msm_drm_private *priv = crtc->dev->dev_private;
74	return to_mdp5_kms(to_mdp_kms(priv->kms));
75}
76
77static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78{
79	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80
81	atomic_or(pending, &mdp5_crtc->pending);
82	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83}
84
85static void request_pp_done_pending(struct drm_crtc *crtc)
86{
87	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
88	reinit_completion(&mdp5_crtc->pp_completion);
89}
90
91static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
92{
93	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
94	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
95	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
96	bool start = !mdp5_cstate->defer_start;
97
98	mdp5_cstate->defer_start = false;
99
100	DBG("%s: flush=%08x", crtc->name, flush_mask);
101
102	return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
103}
104
105/*
106 * flush updates, to make sure hw is updated to new scanout fb,
107 * so that we can safely queue unref to current fb (ie. next
108 * vblank we know hw is done w/ previous scanout_fb).
109 */
110static u32 crtc_flush_all(struct drm_crtc *crtc)
111{
112	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
113	struct mdp5_hw_mixer *mixer, *r_mixer;
114	struct drm_plane *plane;
115	uint32_t flush_mask = 0;
116
117	/* this should not happen: */
118	if (WARN_ON(!mdp5_cstate->ctl))
119		return 0;
120
121	drm_atomic_crtc_for_each_plane(plane, crtc) {
122		if (!plane->state->visible)
123			continue;
124		flush_mask |= mdp5_plane_get_flush(plane);
125	}
126
127	mixer = mdp5_cstate->pipeline.mixer;
128	flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
129
130	r_mixer = mdp5_cstate->pipeline.r_mixer;
131	if (r_mixer)
132		flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
133
134	return crtc_flush(crtc, flush_mask);
135}
136
137/* if file!=NULL, this is preclose potential cancel-flip path */
138static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
139{
140	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
141	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
142	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
144	struct drm_device *dev = crtc->dev;
145	struct drm_pending_vblank_event *event;
146	unsigned long flags;
147
148	spin_lock_irqsave(&dev->event_lock, flags);
149	event = mdp5_crtc->event;
150	if (event) {
151		mdp5_crtc->event = NULL;
152		DBG("%s: send event: %p", crtc->name, event);
153		drm_crtc_send_vblank_event(crtc, event);
154	}
155	spin_unlock_irqrestore(&dev->event_lock, flags);
156
157	if (ctl && !crtc->state->enable) {
158		/* set STAGE_UNUSED for all layers */
159		mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
160		/* XXX: What to do here? */
161		/* mdp5_crtc->ctl = NULL; */
162	}
163}
164
165static void unref_cursor_worker(struct drm_flip_work *work, void *val)
166{
167	struct mdp5_crtc *mdp5_crtc =
168		container_of(work, struct mdp5_crtc, unref_cursor_work);
169	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
170	struct msm_kms *kms = &mdp5_kms->base.base;
171
172	msm_gem_unpin_iova(val, kms->aspace);
173	drm_gem_object_put(val);
174}
175
176static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
177{
178	struct mdp5_crtc *mdp5_crtc = ptr;
179
180	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
181}
182
183static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
184{
185	switch (stage) {
186	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
187	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
188	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
189	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
190	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
191	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
192	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
193	default:
194		return 0;
195	}
196}
197
198/*
199 * left/right pipe offsets for the stage array used in blend_setup()
200 */
201#define PIPE_LEFT	0
202#define PIPE_RIGHT	1
203
204/*
205 * blend_setup() - blend all the planes of a CRTC
206 *
207 * If no base layer is available, border will be enabled as the base layer.
208 * Otherwise all layers will be blended based on their stage calculated
209 * in mdp5_crtc_atomic_check.
210 */
211static void blend_setup(struct drm_crtc *crtc)
212{
213	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
214	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
215	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
216	struct mdp5_kms *mdp5_kms = get_kms(crtc);
217	struct drm_plane *plane;
218	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
219	const struct mdp_format *format;
220	struct mdp5_hw_mixer *mixer = pipeline->mixer;
221	uint32_t lm = mixer->lm;
222	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
223	uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
224	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
225	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
226	unsigned long flags;
227	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
228	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
229	int i, plane_cnt = 0;
230	bool bg_alpha_enabled = false;
231	u32 mixer_op_mode = 0;
232	u32 val;
233#define blender(stage)	((stage) - STAGE0)
234
235	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
236
237	/* ctl could be released already when we are shutting down: */
238	/* XXX: Can this happen now? */
239	if (!ctl)
240		goto out;
241
242	/* Collect all plane information */
243	drm_atomic_crtc_for_each_plane(plane, crtc) {
244		enum mdp5_pipe right_pipe;
245
246		if (!plane->state->visible)
247			continue;
248
249		pstate = to_mdp5_plane_state(plane->state);
250		pstates[pstate->stage] = pstate;
251		stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
252		/*
253		 * if we have a right mixer, stage the same pipe as we
254		 * have on the left mixer
255		 */
256		if (r_mixer)
257			r_stage[pstate->stage][PIPE_LEFT] =
258						mdp5_plane_pipe(plane);
259		/*
260		 * if we have a right pipe (i.e, the plane comprises of 2
261		 * hwpipes, then stage the right pipe on the right side of both
262		 * the layer mixers
263		 */
264		right_pipe = mdp5_plane_right_pipe(plane);
265		if (right_pipe) {
266			stage[pstate->stage][PIPE_RIGHT] = right_pipe;
267			r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
268		}
269
270		plane_cnt++;
271	}
272
273	if (!pstates[STAGE_BASE]) {
274		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
275		DBG("Border Color is enabled");
276	} else if (plane_cnt) {
277		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
278
279		if (format->alpha_enable)
280			bg_alpha_enabled = true;
281	}
282
283	/* The reset for blending */
284	for (i = STAGE0; i <= STAGE_MAX; i++) {
285		if (!pstates[i])
286			continue;
287
288		format = to_mdp_format(
289			msm_framebuffer_format(pstates[i]->base.fb));
290		plane = pstates[i]->base.plane;
291		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
292			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
293		fg_alpha = pstates[i]->base.alpha >> 8;
294		bg_alpha = 0xFF - fg_alpha;
295
296		if (!format->alpha_enable && bg_alpha_enabled)
297			mixer_op_mode = 0;
298		else
299			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
300
301		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
302
303		if (format->alpha_enable &&
304		    pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
305			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
306				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
307			if (fg_alpha != 0xff) {
308				bg_alpha = fg_alpha;
309				blend_op |=
310					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
311					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
312			} else {
313				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
314			}
315		} else if (format->alpha_enable &&
316			   pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
317			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
318				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
319			if (fg_alpha != 0xff) {
320				bg_alpha = fg_alpha;
321				blend_op |=
322				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
323				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
324				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
325				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
326			} else {
327				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
328			}
329		}
330
331		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
332				blender(i)), blend_op);
333		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
334				blender(i)), fg_alpha);
335		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
336				blender(i)), bg_alpha);
337		if (r_mixer) {
338			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
339					blender(i)), blend_op);
340			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
341					blender(i)), fg_alpha);
342			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
343					blender(i)), bg_alpha);
344		}
345	}
346
347	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
348	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
349		   val | mixer_op_mode);
350	if (r_mixer) {
351		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
352		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
353			   val | mixer_op_mode);
354	}
355
356	mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
357		       ctl_blend_flags);
358out:
359	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
360}
361
362static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
363{
364	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
365	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
366	struct mdp5_kms *mdp5_kms = get_kms(crtc);
367	struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
368	struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
369	uint32_t lm = mixer->lm;
370	u32 mixer_width, val;
371	unsigned long flags;
372	struct drm_display_mode *mode;
373
374	if (WARN_ON(!crtc->state))
375		return;
376
377	mode = &crtc->state->adjusted_mode;
378
379	DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
380
381	mixer_width = mode->hdisplay;
382	if (r_mixer)
383		mixer_width /= 2;
384
385	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
386	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
387			MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
388			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
389
390	/* Assign mixer to LEFT side in source split mode */
391	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
392	val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
394
395	if (r_mixer) {
396		u32 r_lm = r_mixer->lm;
397
398		mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
399			   MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
400			   MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
401
402		/* Assign mixer to RIGHT side in source split mode */
403		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
404		val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
405		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
406	}
407
408	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
409}
410
411static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
412{
413	struct drm_device *dev = crtc->dev;
414	struct drm_encoder *encoder;
415
416	drm_for_each_encoder(encoder, dev)
417		if (encoder->crtc == crtc)
418			return encoder;
419
420	return NULL;
421}
422
423static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
424					   bool in_vblank_irq,
425					   int *vpos, int *hpos,
426					   ktime_t *stime, ktime_t *etime,
427					   const struct drm_display_mode *mode)
428{
429	unsigned int pipe = crtc->index;
430	struct drm_encoder *encoder;
431	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
432
433
434	encoder = get_encoder_from_crtc(crtc);
435	if (!encoder) {
436		DRM_ERROR("no encoder found for crtc %d\n", pipe);
437		return false;
438	}
439
440	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
441	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
442
443	/*
444	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
445	 * the end of VFP. Translate the porch values relative to the line
446	 * counter positions.
447	 */
448
449	vactive_start = vsw + vbp + 1;
450
451	vactive_end = vactive_start + mode->crtc_vdisplay;
452
453	/* last scan line before VSYNC */
454	vfp_end = mode->crtc_vtotal;
455
456	if (stime)
457		*stime = ktime_get();
458
459	line = mdp5_encoder_get_linecount(encoder);
460
461	if (line < vactive_start)
462		line -= vactive_start;
463	else if (line > vactive_end)
464		line = line - vfp_end - vactive_start;
465	else
466		line -= vactive_start;
467
468	*vpos = line;
469	*hpos = 0;
470
471	if (etime)
472		*etime = ktime_get();
473
474	return true;
475}
476
477static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
478{
479	struct drm_encoder *encoder;
480
481	encoder = get_encoder_from_crtc(crtc);
482	if (!encoder)
483		return 0;
484
485	return mdp5_encoder_get_framecount(encoder);
486}
487
488static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
489				     struct drm_atomic_state *state)
490{
491	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
492	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
493	struct mdp5_kms *mdp5_kms = get_kms(crtc);
494	struct device *dev = &mdp5_kms->pdev->dev;
495	unsigned long flags;
496
497	DBG("%s", crtc->name);
498
499	if (WARN_ON(!mdp5_crtc->enabled))
500		return;
501
502	/* Disable/save vblank irq handling before power is disabled */
503	drm_crtc_vblank_off(crtc);
504
505	if (mdp5_cstate->cmd_mode)
506		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
507
508	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
509	pm_runtime_put_sync(dev);
510
511	if (crtc->state->event && !crtc->state->active) {
512		WARN_ON(mdp5_crtc->event);
513		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
514		drm_crtc_send_vblank_event(crtc, crtc->state->event);
515		crtc->state->event = NULL;
516		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
517	}
518
519	mdp5_crtc->enabled = false;
520}
521
522static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
523{
524	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
525	struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
526	u32 count;
527
528	count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
529	drm_crtc_set_max_vblank_count(crtc, count);
530
531	drm_crtc_vblank_on(crtc);
532}
533
534static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
535				    struct drm_atomic_state *state)
536{
537	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
538	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
539	struct mdp5_kms *mdp5_kms = get_kms(crtc);
540	struct device *dev = &mdp5_kms->pdev->dev;
541
542	DBG("%s", crtc->name);
543
544	if (WARN_ON(mdp5_crtc->enabled))
545		return;
546
547	pm_runtime_get_sync(dev);
548
549	if (mdp5_crtc->lm_cursor_enabled) {
550		/*
551		 * Restore LM cursor state, as it might have been lost
552		 * with suspend:
553		 */
554		if (mdp5_crtc->cursor.iova) {
555			unsigned long flags;
556
557			spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
558			mdp5_crtc_restore_cursor(crtc);
559			spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
560
561			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
562					    &mdp5_cstate->pipeline, 0, true);
563		} else {
564			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
565					    &mdp5_cstate->pipeline, 0, false);
566		}
567	}
568
569	/* Restore vblank irq handling after power is enabled */
570	mdp5_crtc_vblank_on(crtc);
571
572	mdp5_crtc_mode_set_nofb(crtc);
573
574	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
575
576	if (mdp5_cstate->cmd_mode)
577		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
578
579	mdp5_crtc->enabled = true;
580}
581
582static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
583				    struct drm_crtc_state *new_crtc_state,
584				    bool need_right_mixer)
585{
586	struct mdp5_crtc_state *mdp5_cstate =
587			to_mdp5_crtc_state(new_crtc_state);
588	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
589	struct mdp5_interface *intf;
590	bool new_mixer = false;
591
592	new_mixer = !pipeline->mixer;
593
594	if ((need_right_mixer && !pipeline->r_mixer) ||
595	    (!need_right_mixer && pipeline->r_mixer))
596		new_mixer = true;
597
598	if (new_mixer) {
599		struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
600		struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
601		u32 caps;
602		int ret;
603
604		caps = MDP_LM_CAP_DISPLAY;
605		if (need_right_mixer)
606			caps |= MDP_LM_CAP_PAIR;
607
608		ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
609					&pipeline->mixer, need_right_mixer ?
610					&pipeline->r_mixer : NULL);
611		if (ret)
612			return ret;
613
614		ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
615		if (ret)
616			return ret;
617
618		if (old_r_mixer) {
619			ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
620			if (ret)
621				return ret;
622
623			if (!need_right_mixer)
624				pipeline->r_mixer = NULL;
625		}
626	}
627
628	/*
629	 * these should have been already set up in the encoder's atomic
630	 * check (called by drm_atomic_helper_check_modeset)
631	 */
632	intf = pipeline->intf;
633
634	mdp5_cstate->err_irqmask = intf2err(intf->num);
635	mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
636
637	if ((intf->type == INTF_DSI) &&
638	    (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
639		mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
640		mdp5_cstate->cmd_mode = true;
641	} else {
642		mdp5_cstate->pp_done_irqmask = 0;
643		mdp5_cstate->cmd_mode = false;
644	}
645
646	return 0;
647}
648
649struct plane_state {
650	struct drm_plane *plane;
651	struct mdp5_plane_state *state;
652};
653
654static int pstate_cmp(const void *a, const void *b)
655{
656	struct plane_state *pa = (struct plane_state *)a;
657	struct plane_state *pb = (struct plane_state *)b;
658	return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
659}
660
661/* is there a helper for this? */
662static bool is_fullscreen(struct drm_crtc_state *cstate,
663		struct drm_plane_state *pstate)
664{
665	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
666		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
667		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
668}
669
670static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
671					struct drm_crtc_state *new_crtc_state,
672					struct drm_plane_state *bpstate)
673{
674	struct mdp5_crtc_state *mdp5_cstate =
675			to_mdp5_crtc_state(new_crtc_state);
676
677	/*
678	 * if we're in source split mode, it's mandatory to have
679	 * border out on the base stage
680	 */
681	if (mdp5_cstate->pipeline.r_mixer)
682		return STAGE0;
683
684	/* if the bottom-most layer is not fullscreen, we need to use
685	 * it for solid-color:
686	 */
687	if (!is_fullscreen(new_crtc_state, bpstate))
688		return STAGE0;
689
690	return STAGE_BASE;
691}
692
693static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
694		struct drm_atomic_state *state)
695{
696	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
697									  crtc);
698	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
699	struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
700	struct mdp5_kms *mdp5_kms = get_kms(crtc);
701	struct drm_plane *plane;
702	struct drm_device *dev = crtc->dev;
703	struct plane_state pstates[STAGE_MAX + 1];
704	const struct mdp5_cfg_hw *hw_cfg;
705	const struct drm_plane_state *pstate;
706	const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
707	bool cursor_plane = false;
708	bool need_right_mixer = false;
709	int cnt = 0, i;
710	int ret;
711	enum mdp_mixer_stage_id start;
712
713	DBG("%s: check", crtc->name);
714
715	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
716		struct mdp5_plane_state *mdp5_pstate =
717				to_mdp5_plane_state(pstate);
718
719		if (!pstate->visible)
720			continue;
721
722		pstates[cnt].plane = plane;
723		pstates[cnt].state = to_mdp5_plane_state(pstate);
724
725		mdp5_pstate->needs_dirtyfb =
726			intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
727
728		/*
729		 * if any plane on this crtc uses 2 hwpipes, then we need
730		 * the crtc to have a right hwmixer.
731		 */
732		if (pstates[cnt].state->r_hwpipe)
733			need_right_mixer = true;
734		cnt++;
735
736		if (plane->type == DRM_PLANE_TYPE_CURSOR)
737			cursor_plane = true;
738	}
739
740	/* bail out early if there aren't any planes */
741	if (!cnt)
742		return 0;
743
744	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
745
746	/*
747	 * we need a right hwmixer if the mode's width is greater than a single
748	 * LM's max width
749	 */
750	if (mode->hdisplay > hw_cfg->lm.max_width)
751		need_right_mixer = true;
752
753	ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
754	if (ret) {
755		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
756		return ret;
757	}
758
759	/* assign a stage based on sorted zpos property */
760	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
761
762	/* trigger a warning if cursor isn't the highest zorder */
763	WARN_ON(cursor_plane &&
764		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
765
766	start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
767
768	/* verify that there are not too many planes attached to crtc
769	 * and that we don't have conflicting mixer stages:
770	 */
771	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
772		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
773			cnt, start);
774		return -EINVAL;
775	}
776
777	for (i = 0; i < cnt; i++) {
778		if (cursor_plane && (i == (cnt - 1)))
779			pstates[i].state->stage = hw_cfg->lm.nb_stages;
780		else
781			pstates[i].state->stage = start + i;
782		DBG("%s: assign pipe %s on stage=%d", crtc->name,
783				pstates[i].plane->name,
784				pstates[i].state->stage);
785	}
786
787	return 0;
788}
789
790static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
791				   struct drm_atomic_state *state)
792{
793	DBG("%s: begin", crtc->name);
794}
795
796static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
797				   struct drm_atomic_state *state)
798{
799	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
800	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
801	struct drm_device *dev = crtc->dev;
802	unsigned long flags;
803
804	DBG("%s: event: %p", crtc->name, crtc->state->event);
805
806	WARN_ON(mdp5_crtc->event);
807
808	spin_lock_irqsave(&dev->event_lock, flags);
809	mdp5_crtc->event = crtc->state->event;
810	crtc->state->event = NULL;
811	spin_unlock_irqrestore(&dev->event_lock, flags);
812
813	/*
814	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
815	 * it means we are trying to flush a CRTC whose state is disabled:
816	 * nothing else needs to be done.
817	 */
818	/* XXX: Can this happen now ? */
819	if (unlikely(!mdp5_cstate->ctl))
820		return;
821
822	blend_setup(crtc);
823
824	/* PP_DONE irq is only used by command mode for now.
825	 * It is better to request pending before FLUSH and START trigger
826	 * to make sure no pp_done irq missed.
827	 * This is safe because no pp_done will happen before SW trigger
828	 * in command mode.
829	 */
830	if (mdp5_cstate->cmd_mode)
831		request_pp_done_pending(crtc);
832
833	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
834
835	/* XXX are we leaking out state here? */
836	mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
837	mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
838	mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
839
840	request_pending(crtc, PENDING_FLIP);
841}
842
843static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
844{
845	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
846	uint32_t xres = crtc->mode.hdisplay;
847	uint32_t yres = crtc->mode.vdisplay;
848
849	/*
850	 * Cursor Region Of Interest (ROI) is a plane read from cursor
851	 * buffer to render. The ROI region is determined by the visibility of
852	 * the cursor point. In the default Cursor image the cursor point will
853	 * be at the top left of the cursor image.
854	 *
855	 * Without rotation:
856	 * If the cursor point reaches the right (xres - x < cursor.width) or
857	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
858	 * width and ROI height need to be evaluated to crop the cursor image
859	 * accordingly.
860	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
861	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
862	 *
863	 * With rotation:
864	 * We get negative x and/or y coordinates.
865	 * (cursor.width - abs(x)) will be new cursor width when x < 0
866	 * (cursor.height - abs(y)) will be new cursor width when y < 0
867	 */
868	if (mdp5_crtc->cursor.x >= 0)
869		*roi_w = min(mdp5_crtc->cursor.width, xres -
870			mdp5_crtc->cursor.x);
871	else
872		*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
873	if (mdp5_crtc->cursor.y >= 0)
874		*roi_h = min(mdp5_crtc->cursor.height, yres -
875			mdp5_crtc->cursor.y);
876	else
877		*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
878}
879
880static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
881{
882	const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
883	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
884	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
885	struct mdp5_kms *mdp5_kms = get_kms(crtc);
886	const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
887	uint32_t blendcfg, stride;
888	uint32_t x, y, src_x, src_y, width, height;
889	uint32_t roi_w, roi_h;
890	int lm;
891
892	assert_spin_locked(&mdp5_crtc->cursor.lock);
893
894	lm = mdp5_cstate->pipeline.mixer->lm;
895
896	x = mdp5_crtc->cursor.x;
897	y = mdp5_crtc->cursor.y;
898	width = mdp5_crtc->cursor.width;
899	height = mdp5_crtc->cursor.height;
900
901	stride = width * info->cpp[0];
902
903	get_roi(crtc, &roi_w, &roi_h);
904
905	/* If cusror buffer overlaps due to rotation on the
906	 * upper or left screen border the pixel offset inside
907	 * the cursor buffer of the ROI is the positive overlap
908	 * distance.
909	 */
910	if (mdp5_crtc->cursor.x < 0) {
911		src_x = abs(mdp5_crtc->cursor.x);
912		x = 0;
913	} else {
914		src_x = 0;
915	}
916	if (mdp5_crtc->cursor.y < 0) {
917		src_y = abs(mdp5_crtc->cursor.y);
918		y = 0;
919	} else {
920		src_y = 0;
921	}
922	DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
923		crtc->name, x, y, roi_w, roi_h, src_x, src_y);
924
925	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
926	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
927			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
928	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
929			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
930			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
931	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
932			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
933			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
934	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
935			MDP5_LM_CURSOR_START_XY_Y_START(y) |
936			MDP5_LM_CURSOR_START_XY_X_START(x));
937	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
938			MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
939			MDP5_LM_CURSOR_XY_SRC_X(src_x));
940	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
941			mdp5_crtc->cursor.iova);
942
943	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
944	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
945	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
946}
947
948static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
949		struct drm_file *file, uint32_t handle,
950		uint32_t width, uint32_t height)
951{
952	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
953	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
954	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
955	struct drm_device *dev = crtc->dev;
956	struct mdp5_kms *mdp5_kms = get_kms(crtc);
957	struct platform_device *pdev = mdp5_kms->pdev;
958	struct msm_kms *kms = &mdp5_kms->base.base;
959	struct drm_gem_object *cursor_bo, *old_bo = NULL;
960	struct mdp5_ctl *ctl;
961	int ret;
962	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
963	bool cursor_enable = true;
964	unsigned long flags;
965
966	if (!mdp5_crtc->lm_cursor_enabled) {
967		dev_warn(dev->dev,
968			 "cursor_set is deprecated with cursor planes\n");
969		return -EINVAL;
970	}
971
972	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
973		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
974		return -EINVAL;
975	}
976
977	ctl = mdp5_cstate->ctl;
978	if (!ctl)
979		return -EINVAL;
980
981	/* don't support LM cursors when we have source split enabled */
982	if (mdp5_cstate->pipeline.r_mixer)
983		return -EINVAL;
984
985	if (!handle) {
986		DBG("Cursor off");
987		cursor_enable = false;
988		mdp5_crtc->cursor.iova = 0;
989		pm_runtime_get_sync(&pdev->dev);
990		goto set_cursor;
991	}
992
993	cursor_bo = drm_gem_object_lookup(file, handle);
994	if (!cursor_bo)
995		return -ENOENT;
996
997	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
998			&mdp5_crtc->cursor.iova);
999	if (ret) {
1000		drm_gem_object_put(cursor_bo);
1001		return -EINVAL;
1002	}
1003
1004	pm_runtime_get_sync(&pdev->dev);
1005
1006	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
1007	old_bo = mdp5_crtc->cursor.scanout_bo;
1008
1009	mdp5_crtc->cursor.scanout_bo = cursor_bo;
1010	mdp5_crtc->cursor.width = width;
1011	mdp5_crtc->cursor.height = height;
1012
1013	mdp5_crtc_restore_cursor(crtc);
1014
1015	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
1016
1017set_cursor:
1018	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
1019	if (ret) {
1020		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
1021				cursor_enable ? "en" : "dis", ret);
1022		goto end;
1023	}
1024
1025	crtc_flush(crtc, flush_mask);
1026
1027end:
1028	pm_runtime_put_sync(&pdev->dev);
1029	if (old_bo) {
1030		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
1031		/* enable vblank to complete cursor work: */
1032		request_pending(crtc, PENDING_CURSOR);
1033	}
1034	return ret;
1035}
1036
1037static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1038{
1039	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1040	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1041	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1042	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
1043	struct drm_device *dev = crtc->dev;
1044	uint32_t roi_w;
1045	uint32_t roi_h;
1046	unsigned long flags;
1047
1048	if (!mdp5_crtc->lm_cursor_enabled) {
1049		dev_warn(dev->dev,
1050			 "cursor_move is deprecated with cursor planes\n");
1051		return -EINVAL;
1052	}
1053
1054	/* don't support LM cursors when we have source split enabled */
1055	if (mdp5_cstate->pipeline.r_mixer)
1056		return -EINVAL;
1057
1058	/* In case the CRTC is disabled, just drop the cursor update */
1059	if (unlikely(!crtc->state->enable))
1060		return 0;
1061
1062	/* accept negative x/y coordinates up to maximum cursor overlap */
1063	mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
1064	mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
1065
1066	get_roi(crtc, &roi_w, &roi_h);
1067
1068	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
1069
1070	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
1071	mdp5_crtc_restore_cursor(crtc);
1072	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
1073
1074	crtc_flush(crtc, flush_mask);
1075
1076	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
1077
1078	return 0;
1079}
1080
1081static void
1082mdp5_crtc_atomic_print_state(struct drm_printer *p,
1083			     const struct drm_crtc_state *state)
1084{
1085	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1086	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
1087	struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
1088
1089	if (WARN_ON(!pipeline))
1090		return;
1091
1092	if (mdp5_cstate->ctl)
1093		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
1094
1095	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
1096			pipeline->mixer->name : "(null)");
1097
1098	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
1099		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
1100			   pipeline->r_mixer->name : "(null)");
1101
1102	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
1103}
1104
1105static struct drm_crtc_state *
1106mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
1107{
1108	struct mdp5_crtc_state *mdp5_cstate;
1109
1110	if (WARN_ON(!crtc->state))
1111		return NULL;
1112
1113	mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
1114			      sizeof(*mdp5_cstate), GFP_KERNEL);
1115	if (!mdp5_cstate)
1116		return NULL;
1117
1118	__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
1119
1120	return &mdp5_cstate->base;
1121}
1122
1123static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
1124{
1125	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1126
1127	__drm_atomic_helper_crtc_destroy_state(state);
1128
1129	kfree(mdp5_cstate);
1130}
1131
1132static void mdp5_crtc_reset(struct drm_crtc *crtc)
1133{
1134	struct mdp5_crtc_state *mdp5_cstate =
1135		kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
1136
1137	if (crtc->state)
1138		mdp5_crtc_destroy_state(crtc, crtc->state);
1139
1140	if (mdp5_cstate)
1141		__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
1142	else
1143		__drm_atomic_helper_crtc_reset(crtc, NULL);
1144}
1145
1146static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
1147	.set_config = drm_atomic_helper_set_config,
1148	.page_flip = drm_atomic_helper_page_flip,
1149	.reset = mdp5_crtc_reset,
1150	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
1151	.atomic_destroy_state = mdp5_crtc_destroy_state,
1152	.atomic_print_state = mdp5_crtc_atomic_print_state,
1153	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
1154	.enable_vblank  = msm_crtc_enable_vblank,
1155	.disable_vblank = msm_crtc_disable_vblank,
1156	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1157};
1158
1159static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1160	.set_config = drm_atomic_helper_set_config,
1161	.page_flip = drm_atomic_helper_page_flip,
1162	.reset = mdp5_crtc_reset,
1163	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
1164	.atomic_destroy_state = mdp5_crtc_destroy_state,
1165	.cursor_set = mdp5_crtc_cursor_set,
1166	.cursor_move = mdp5_crtc_cursor_move,
1167	.atomic_print_state = mdp5_crtc_atomic_print_state,
1168	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
1169	.enable_vblank  = msm_crtc_enable_vblank,
1170	.disable_vblank = msm_crtc_disable_vblank,
1171	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1172};
1173
1174static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1175	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
1176	.atomic_check = mdp5_crtc_atomic_check,
1177	.atomic_begin = mdp5_crtc_atomic_begin,
1178	.atomic_flush = mdp5_crtc_atomic_flush,
1179	.atomic_enable = mdp5_crtc_atomic_enable,
1180	.atomic_disable = mdp5_crtc_atomic_disable,
1181	.get_scanout_position = mdp5_crtc_get_scanout_position,
1182};
1183
1184static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1185{
1186	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1187	struct drm_crtc *crtc = &mdp5_crtc->base;
1188	struct msm_drm_private *priv = crtc->dev->dev_private;
1189	unsigned pending;
1190
1191	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1192
1193	pending = atomic_xchg(&mdp5_crtc->pending, 0);
1194
1195	if (pending & PENDING_FLIP) {
1196		complete_flip(crtc, NULL);
1197	}
1198
1199	if (pending & PENDING_CURSOR)
1200		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1201}
1202
1203static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1204{
1205	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1206
1207	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1208}
1209
1210static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1211{
1212	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1213								pp_done);
1214
1215	complete_all(&mdp5_crtc->pp_completion);
1216}
1217
1218static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1219{
1220	struct drm_device *dev = crtc->dev;
1221	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1222	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1223	int ret;
1224
1225	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1226						msecs_to_jiffies(50));
1227	if (ret == 0)
1228		dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
1229				     mdp5_cstate->pipeline.mixer->lm);
1230}
1231
1232static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1233{
1234	struct drm_device *dev = crtc->dev;
1235	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1236	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1237	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1238	int ret;
1239
1240	/* Should not call this function if crtc is disabled. */
1241	if (!ctl)
1242		return;
1243
1244	ret = drm_crtc_vblank_get(crtc);
1245	if (ret)
1246		return;
1247
1248	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1249		((mdp5_ctl_get_commit_status(ctl) &
1250		mdp5_crtc->flushed_mask) == 0),
1251		msecs_to_jiffies(50));
1252	if (ret <= 0)
1253		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1254
1255	mdp5_crtc->flushed_mask = 0;
1256
1257	drm_crtc_vblank_put(crtc);
1258}
1259
1260uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1261{
1262	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1263	return mdp5_crtc->vblank.irqmask;
1264}
1265
1266void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1267{
1268	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1269	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1270
1271	/* should this be done elsewhere ? */
1272	mdp_irq_update(&mdp5_kms->base);
1273
1274	mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1275}
1276
1277struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1278{
1279	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1280
1281	return mdp5_cstate->ctl;
1282}
1283
1284struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1285{
1286	struct mdp5_crtc_state *mdp5_cstate;
1287
1288	if (WARN_ON(!crtc))
1289		return ERR_PTR(-EINVAL);
1290
1291	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1292
1293	return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1294		ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1295}
1296
1297struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1298{
1299	struct mdp5_crtc_state *mdp5_cstate;
1300
1301	if (WARN_ON(!crtc))
1302		return ERR_PTR(-EINVAL);
1303
1304	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1305
1306	return &mdp5_cstate->pipeline;
1307}
1308
1309void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1310{
1311	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1312
1313	if (mdp5_cstate->cmd_mode)
1314		mdp5_crtc_wait_for_pp_done(crtc);
1315	else
1316		mdp5_crtc_wait_for_flush_done(crtc);
1317}
1318
1319/* initialize crtc */
1320struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1321				struct drm_plane *plane,
1322				struct drm_plane *cursor_plane, int id)
1323{
1324	struct drm_crtc *crtc = NULL;
1325	struct mdp5_crtc *mdp5_crtc;
1326	int ret;
1327
1328	mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base,
1329						plane, cursor_plane,
1330						cursor_plane ?
1331						&mdp5_crtc_no_lm_cursor_funcs :
1332						&mdp5_crtc_funcs,
1333						NULL);
1334	if (IS_ERR(mdp5_crtc))
1335		return ERR_CAST(mdp5_crtc);
1336
1337	crtc = &mdp5_crtc->base;
1338
1339	mdp5_crtc->id = id;
1340
1341	spin_lock_init(&mdp5_crtc->lm_lock);
1342	spin_lock_init(&mdp5_crtc->cursor.lock);
1343	init_completion(&mdp5_crtc->pp_completion);
1344
1345	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1346	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1347	mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1348
1349	mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1350
1351	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1352			"unref cursor", unref_cursor_worker);
1353	ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc);
1354	if (ret)
1355		return ERR_PTR(ret);
1356
1357	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1358
1359	return crtc;
1360}
1361