1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
3 */
4
5#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
6#include "dpu_encoder_phys.h"
7#include "dpu_hw_interrupts.h"
8#include "dpu_hw_merge3d.h"
9#include "dpu_core_irq.h"
10#include "dpu_formats.h"
11#include "dpu_trace.h"
12#include "disp/msm_disp_snapshot.h"
13
14#include <drm/drm_managed.h>
15
16#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
17		(e) && (e)->parent ? \
18		(e)->parent->base.id : -1, \
19		(e) && (e)->hw_intf ? \
20		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
21
22#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
23		(e) && (e)->parent ? \
24		(e)->parent->base.id : -1, \
25		(e) && (e)->hw_intf ? \
26		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
27
28#define to_dpu_encoder_phys_vid(x) \
29	container_of(x, struct dpu_encoder_phys_vid, base)
30
31static bool dpu_encoder_phys_vid_is_master(
32		struct dpu_encoder_phys *phys_enc)
33{
34	bool ret = false;
35
36	if (phys_enc->split_role != ENC_ROLE_SLAVE)
37		ret = true;
38
39	return ret;
40}
41
42static void drm_mode_to_intf_timing_params(
43		const struct dpu_encoder_phys *phys_enc,
44		const struct drm_display_mode *mode,
45		struct dpu_hw_intf_timing_params *timing)
46{
47	memset(timing, 0, sizeof(*timing));
48
49	if ((mode->htotal < mode->hsync_end)
50			|| (mode->hsync_start < mode->hdisplay)
51			|| (mode->vtotal < mode->vsync_end)
52			|| (mode->vsync_start < mode->vdisplay)
53			|| (mode->hsync_end < mode->hsync_start)
54			|| (mode->vsync_end < mode->vsync_start)) {
55		DPU_ERROR(
56		    "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
57				mode->hsync_start, mode->hsync_end,
58				mode->htotal, mode->hdisplay);
59		DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
60				mode->vsync_start, mode->vsync_end,
61				mode->vtotal, mode->vdisplay);
62		return;
63	}
64
65	/*
66	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
67	 *  Active Region      Front Porch   Sync   Back Porch
68	 * <-----------------><------------><-----><----------->
69	 * <- [hv]display --->
70	 * <--------- [hv]sync_start ------>
71	 * <----------------- [hv]sync_end ------->
72	 * <---------------------------- [hv]total ------------->
73	 */
74	timing->width = mode->hdisplay;	/* active width */
75	timing->height = mode->vdisplay;	/* active height */
76	timing->xres = timing->width;
77	timing->yres = timing->height;
78	timing->h_back_porch = mode->htotal - mode->hsync_end;
79	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
80	timing->v_back_porch = mode->vtotal - mode->vsync_end;
81	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
82	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
83	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
84	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
85	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
86	timing->border_clr = 0;
87	timing->underflow_clr = 0xff;
88	timing->hsync_skew = mode->hskew;
89
90	/* DSI controller cannot handle active-low sync signals. */
91	if (phys_enc->hw_intf->cap->type == INTF_DSI) {
92		timing->hsync_polarity = 0;
93		timing->vsync_polarity = 0;
94	}
95
96	/* for DP/EDP, Shift timings to align it to bottom right */
97	if (phys_enc->hw_intf->cap->type == INTF_DP) {
98		timing->h_back_porch += timing->h_front_porch;
99		timing->h_front_porch = 0;
100		timing->v_back_porch += timing->v_front_porch;
101		timing->v_front_porch = 0;
102	}
103
104	timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
105	timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
106
107	/*
108	 * for DP, divide the horizonal parameters by 2 when
109	 * widebus is enabled
110	 */
111	if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
112		timing->width = timing->width >> 1;
113		timing->xres = timing->xres >> 1;
114		timing->h_back_porch = timing->h_back_porch >> 1;
115		timing->h_front_porch = timing->h_front_porch >> 1;
116		timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
117	}
118}
119
120static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
121{
122	u32 active = timing->xres;
123	u32 inactive =
124	    timing->h_back_porch + timing->h_front_porch +
125	    timing->hsync_pulse_width;
126	return active + inactive;
127}
128
129static u32 get_vertical_total(const struct dpu_hw_intf_timing_params *timing)
130{
131	u32 active = timing->yres;
132	u32 inactive =
133	    timing->v_back_porch + timing->v_front_porch +
134	    timing->vsync_pulse_width;
135	return active + inactive;
136}
137
138/*
139 * programmable_fetch_get_num_lines:
140 *	Number of fetch lines in vertical front porch
141 * @timing: Pointer to the intf timing information for the requested mode
142 *
143 * Returns the number of fetch lines in vertical front porch at which mdp
144 * can start fetching the next frame.
145 *
146 * Number of needed prefetch lines is anything that cannot be absorbed in the
147 * start of frame time (back porch + vsync pulse width).
148 *
149 * Some panels have very large VFP, however we only need a total number of
150 * lines based on the chip worst case latencies.
151 */
152static u32 programmable_fetch_get_num_lines(
153		struct dpu_encoder_phys *phys_enc,
154		const struct dpu_hw_intf_timing_params *timing)
155{
156	u32 worst_case_needed_lines =
157	    phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
158	u32 start_of_frame_lines =
159	    timing->v_back_porch + timing->vsync_pulse_width;
160	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
161	u32 actual_vfp_lines = 0;
162
163	/* Fetch must be outside active lines, otherwise undefined. */
164	if (start_of_frame_lines >= worst_case_needed_lines) {
165		DPU_DEBUG_VIDENC(phys_enc,
166				"prog fetch is not needed, large vbp+vsw\n");
167		actual_vfp_lines = 0;
168	} else if (timing->v_front_porch < needed_vfp_lines) {
169		/* Warn fetch needed, but not enough porch in panel config */
170		pr_warn_once
171			("low vbp+vfp may lead to perf issues in some cases\n");
172		DPU_DEBUG_VIDENC(phys_enc,
173				"less vfp than fetch req, using entire vfp\n");
174		actual_vfp_lines = timing->v_front_porch;
175	} else {
176		DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
177		actual_vfp_lines = needed_vfp_lines;
178	}
179
180	DPU_DEBUG_VIDENC(phys_enc,
181		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
182		timing->v_front_porch, timing->v_back_porch,
183		timing->vsync_pulse_width);
184	DPU_DEBUG_VIDENC(phys_enc,
185		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
186		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
187
188	return actual_vfp_lines;
189}
190
191/*
192 * programmable_fetch_config: Programs HW to prefetch lines by offsetting
193 *	the start of fetch into the vertical front porch for cases where the
194 *	vsync pulse width and vertical back porch time is insufficient
195 *
196 *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
197 *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
198 *
199 * @timing: Pointer to the intf timing information for the requested mode
200 */
201static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
202				      const struct dpu_hw_intf_timing_params *timing)
203{
204	struct dpu_hw_intf_prog_fetch f = { 0 };
205	u32 vfp_fetch_lines = 0;
206	u32 horiz_total = 0;
207	u32 vert_total = 0;
208	u32 vfp_fetch_start_vsync_counter = 0;
209	unsigned long lock_flags;
210
211	if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
212		return;
213
214	vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
215	if (vfp_fetch_lines) {
216		vert_total = get_vertical_total(timing);
217		horiz_total = get_horizontal_total(timing);
218		vfp_fetch_start_vsync_counter =
219		    (vert_total - vfp_fetch_lines) * horiz_total + 1;
220		f.enable = 1;
221		f.fetch_start = vfp_fetch_start_vsync_counter;
222	}
223
224	DPU_DEBUG_VIDENC(phys_enc,
225		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
226		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
227
228	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
229	phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
230	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
231}
232
233static void dpu_encoder_phys_vid_setup_timing_engine(
234		struct dpu_encoder_phys *phys_enc)
235{
236	struct drm_display_mode mode;
237	struct dpu_hw_intf_timing_params timing_params = { 0 };
238	const struct dpu_format *fmt = NULL;
239	u32 fmt_fourcc;
240	unsigned long lock_flags;
241	struct dpu_hw_intf_cfg intf_cfg = { 0 };
242
243	drm_mode_init(&mode, &phys_enc->cached_mode);
244
245	if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
246		DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
247		return;
248	}
249
250	if (!phys_enc->hw_intf->ops.setup_timing_gen) {
251		DPU_ERROR("timing engine setup is not supported\n");
252		return;
253	}
254
255	DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
256	drm_mode_debug_printmodeline(&mode);
257
258	fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
259
260	if (phys_enc->split_role != ENC_ROLE_SOLO || fmt_fourcc == DRM_FORMAT_YUV420) {
261		mode.hdisplay >>= 1;
262		mode.htotal >>= 1;
263		mode.hsync_start >>= 1;
264		mode.hsync_end >>= 1;
265		mode.hskew >>= 1;
266
267		DPU_DEBUG_VIDENC(phys_enc,
268			"split_role %d, halve horizontal %d %d %d %d %d\n",
269			phys_enc->split_role,
270			mode.hdisplay, mode.htotal,
271			mode.hsync_start, mode.hsync_end,
272			mode.hskew);
273	}
274
275	drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
276
277	fmt = dpu_get_dpu_format(fmt_fourcc);
278	DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
279
280	if (phys_enc->hw_cdm)
281		intf_cfg.cdm = phys_enc->hw_cdm->idx;
282	intf_cfg.intf = phys_enc->hw_intf->idx;
283	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
284	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
285	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
286	intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
287	if (phys_enc->hw_pp->merge_3d)
288		intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
289
290	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
291	phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
292			&timing_params, fmt);
293	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
294
295	/* setup which pp blk will connect to this intf */
296	if (phys_enc->hw_intf->ops.bind_pingpong_blk)
297		phys_enc->hw_intf->ops.bind_pingpong_blk(
298				phys_enc->hw_intf,
299				phys_enc->hw_pp->idx);
300
301	if (phys_enc->hw_pp->merge_3d)
302		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
303
304	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
305
306	programmable_fetch_config(phys_enc, &timing_params);
307}
308
309static void dpu_encoder_phys_vid_vblank_irq(void *arg)
310{
311	struct dpu_encoder_phys *phys_enc = arg;
312	struct dpu_hw_ctl *hw_ctl;
313	unsigned long lock_flags;
314	u32 flush_register = 0;
315
316	hw_ctl = phys_enc->hw_ctl;
317
318	DPU_ATRACE_BEGIN("vblank_irq");
319
320	dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
321
322	atomic_read(&phys_enc->pending_kickoff_cnt);
323
324	/*
325	 * only decrement the pending flush count if we've actually flushed
326	 * hardware. due to sw irq latency, vblank may have already happened
327	 * so we need to double-check with hw that it accepted the flush bits
328	 */
329	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
330	if (hw_ctl->ops.get_flush_register)
331		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
332
333	if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
334		atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
335	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
336
337	/* Signal any waiting atomic commit thread */
338	wake_up_all(&phys_enc->pending_kickoff_wq);
339
340	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
341			DPU_ENCODER_FRAME_EVENT_DONE);
342
343	DPU_ATRACE_END("vblank_irq");
344}
345
346static void dpu_encoder_phys_vid_underrun_irq(void *arg)
347{
348	struct dpu_encoder_phys *phys_enc = arg;
349
350	dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
351}
352
353static bool dpu_encoder_phys_vid_needs_single_flush(
354		struct dpu_encoder_phys *phys_enc)
355{
356	return phys_enc->split_role != ENC_ROLE_SOLO;
357}
358
359static int dpu_encoder_phys_vid_control_vblank_irq(
360		struct dpu_encoder_phys *phys_enc,
361		bool enable)
362{
363	int ret = 0;
364	int refcount;
365
366	mutex_lock(&phys_enc->vblank_ctl_lock);
367	refcount = phys_enc->vblank_refcount;
368
369	/* Slave encoders don't report vblank */
370	if (!dpu_encoder_phys_vid_is_master(phys_enc))
371		goto end;
372
373	/* protect against negative */
374	if (!enable && refcount == 0) {
375		ret = -EINVAL;
376		goto end;
377	}
378
379	DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
380		      refcount);
381
382	if (enable) {
383		if (phys_enc->vblank_refcount == 0)
384			ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
385					phys_enc->irq[INTR_IDX_VSYNC],
386					dpu_encoder_phys_vid_vblank_irq,
387					phys_enc);
388		if (!ret)
389			phys_enc->vblank_refcount++;
390	} else if (!enable) {
391		if (phys_enc->vblank_refcount == 1)
392			ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
393					phys_enc->irq[INTR_IDX_VSYNC]);
394		if (!ret)
395			phys_enc->vblank_refcount--;
396	}
397
398end:
399	mutex_unlock(&phys_enc->vblank_ctl_lock);
400	if (ret) {
401		DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
402			  DRMID(phys_enc->parent),
403			  phys_enc->hw_intf->idx - INTF_0, ret, enable,
404			  refcount);
405	}
406	return ret;
407}
408
409static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
410{
411	struct dpu_hw_ctl *ctl;
412	const struct dpu_format *fmt;
413	u32 fmt_fourcc;
414
415	ctl = phys_enc->hw_ctl;
416	fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
417	fmt = dpu_get_dpu_format(fmt_fourcc);
418
419	DPU_DEBUG_VIDENC(phys_enc, "\n");
420
421	if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
422		return;
423
424	dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
425
426	dpu_encoder_helper_phys_setup_cdm(phys_enc, fmt, CDM_CDWN_OUTPUT_HDMI);
427
428	dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
429
430	/*
431	 * For single flush cases (dual-ctl or pp-split), skip setting the
432	 * flush bit for the slave intf, since both intfs use same ctl
433	 * and HW will only flush the master.
434	 */
435	if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
436		!dpu_encoder_phys_vid_is_master(phys_enc))
437		goto skip_flush;
438
439	ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
440	if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
441		ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
442
443	if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
444		ctl->ops.update_pending_flush_cdm(ctl, phys_enc->hw_cdm->idx);
445
446	/*
447	 * Peripheral flush must be updated whenever flushing SDP packets is needed.
448	 * SDP packets are required for any YUV format (YUV420, YUV422, YUV444).
449	 */
450	if (ctl->ops.update_pending_flush_periph && dpu_encoder_needs_periph_flush(phys_enc))
451		ctl->ops.update_pending_flush_periph(ctl, phys_enc->hw_intf->idx);
452
453skip_flush:
454	DPU_DEBUG_VIDENC(phys_enc,
455		"update pending flush ctl %d intf %d\n",
456		ctl->idx - CTL_0, phys_enc->hw_intf->idx);
457
458	atomic_set(&phys_enc->underrun_cnt, 0);
459
460	/* ctl_flush & timing engine enable will be triggered by framework */
461	if (phys_enc->enable_state == DPU_ENC_DISABLED)
462		phys_enc->enable_state = DPU_ENC_ENABLING;
463}
464
465static int dpu_encoder_phys_vid_wait_for_tx_complete(
466		struct dpu_encoder_phys *phys_enc)
467{
468	struct dpu_encoder_wait_info wait_info;
469	int ret;
470
471	wait_info.wq = &phys_enc->pending_kickoff_wq;
472	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
473	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
474
475	if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
476		return 0;
477	}
478
479	/* Wait for kickoff to complete */
480	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
481			phys_enc->irq[INTR_IDX_VSYNC],
482			dpu_encoder_phys_vid_vblank_irq,
483			&wait_info);
484
485	if (ret == -ETIMEDOUT) {
486		dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
487	}
488
489	return ret;
490}
491
492static int dpu_encoder_phys_vid_wait_for_commit_done(
493		struct dpu_encoder_phys *phys_enc)
494{
495	struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
496	int ret;
497
498	if (!hw_ctl)
499		return 0;
500
501	ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
502		(hw_ctl->ops.get_flush_register(hw_ctl) == 0),
503		msecs_to_jiffies(50));
504	if (ret <= 0) {
505		DPU_ERROR("vblank timeout: %x\n", hw_ctl->ops.get_flush_register(hw_ctl));
506		return -ETIMEDOUT;
507	}
508
509	return 0;
510}
511
512static void dpu_encoder_phys_vid_prepare_for_kickoff(
513		struct dpu_encoder_phys *phys_enc)
514{
515	struct dpu_hw_ctl *ctl;
516	int rc;
517	struct drm_encoder *drm_enc;
518
519	drm_enc = phys_enc->parent;
520
521	ctl = phys_enc->hw_ctl;
522	if (!ctl->ops.wait_reset_status)
523		return;
524
525	/*
526	 * hw supports hardware initiated ctl reset, so before we kickoff a new
527	 * frame, need to check and wait for hw initiated ctl reset completion
528	 */
529	rc = ctl->ops.wait_reset_status(ctl);
530	if (rc) {
531		DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
532				ctl->idx, rc);
533		msm_disp_snapshot_state(drm_enc->dev);
534		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
535				phys_enc->irq[INTR_IDX_VSYNC]);
536	}
537}
538
539static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
540{
541	unsigned long lock_flags;
542	int ret;
543	struct dpu_hw_intf_status intf_status = {0};
544
545	if (!phys_enc->parent || !phys_enc->parent->dev) {
546		DPU_ERROR("invalid encoder/device\n");
547		return;
548	}
549
550	if (!phys_enc->hw_intf) {
551		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
552				phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
553		return;
554	}
555
556	if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
557		return;
558
559	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
560		DPU_ERROR("already disabled\n");
561		return;
562	}
563
564	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
565	phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
566	if (dpu_encoder_phys_vid_is_master(phys_enc))
567		dpu_encoder_phys_inc_pending(phys_enc);
568	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
569
570	/*
571	 * Wait for a vsync so we know the ENABLE=0 latched before
572	 * the (connector) source of the vsync's gets disabled,
573	 * otherwise we end up in a funny state if we re-enable
574	 * before the disable latches, which results that some of
575	 * the settings changes for the new modeset (like new
576	 * scanout buffer) don't latch properly..
577	 */
578	if (dpu_encoder_phys_vid_is_master(phys_enc)) {
579		ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
580		if (ret) {
581			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
582			DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
583				  DRMID(phys_enc->parent),
584				  phys_enc->hw_intf->idx - INTF_0, ret);
585		}
586	}
587
588	if (phys_enc->hw_intf && phys_enc->hw_intf->ops.get_status)
589		phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &intf_status);
590
591	/*
592	 * Wait for a vsync if timing en status is on after timing engine
593	 * is disabled.
594	 */
595	if (intf_status.is_en && dpu_encoder_phys_vid_is_master(phys_enc)) {
596		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
597		dpu_encoder_phys_inc_pending(phys_enc);
598		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
599		ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
600		if (ret) {
601			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
602			DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
603				  DRMID(phys_enc->parent),
604				  phys_enc->hw_intf->idx - INTF_0, ret);
605		}
606	}
607
608	dpu_encoder_helper_phys_cleanup(phys_enc);
609	phys_enc->enable_state = DPU_ENC_DISABLED;
610}
611
612static void dpu_encoder_phys_vid_handle_post_kickoff(
613		struct dpu_encoder_phys *phys_enc)
614{
615	unsigned long lock_flags;
616
617	/*
618	 * Video mode must flush CTL before enabling timing engine
619	 * Video encoders need to turn on their interfaces now
620	 */
621	if (phys_enc->enable_state == DPU_ENC_ENABLING) {
622		trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
623				    phys_enc->hw_intf->idx - INTF_0);
624		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
625		phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
626		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
627		phys_enc->enable_state = DPU_ENC_ENABLED;
628	}
629}
630
631static void dpu_encoder_phys_vid_irq_enable(struct dpu_encoder_phys *phys_enc)
632{
633	int ret;
634
635	trace_dpu_enc_phys_vid_irq_enable(DRMID(phys_enc->parent),
636					  phys_enc->hw_intf->idx - INTF_0,
637					  phys_enc->vblank_refcount);
638
639	ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
640	if (WARN_ON(ret))
641		return;
642
643	dpu_core_irq_register_callback(phys_enc->dpu_kms,
644				       phys_enc->irq[INTR_IDX_UNDERRUN],
645				       dpu_encoder_phys_vid_underrun_irq,
646				       phys_enc);
647}
648
649static void dpu_encoder_phys_vid_irq_disable(struct dpu_encoder_phys *phys_enc)
650{
651	trace_dpu_enc_phys_vid_irq_disable(DRMID(phys_enc->parent),
652					   phys_enc->hw_intf->idx - INTF_0,
653					   phys_enc->vblank_refcount);
654
655	dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
656	dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
657					 phys_enc->irq[INTR_IDX_UNDERRUN]);
658}
659
660static int dpu_encoder_phys_vid_get_line_count(
661		struct dpu_encoder_phys *phys_enc)
662{
663	if (!dpu_encoder_phys_vid_is_master(phys_enc))
664		return -EINVAL;
665
666	if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
667		return -EINVAL;
668
669	return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
670}
671
672static int dpu_encoder_phys_vid_get_frame_count(
673		struct dpu_encoder_phys *phys_enc)
674{
675	struct dpu_hw_intf_status s = {0};
676	u32 fetch_start = 0;
677	struct drm_display_mode mode;
678
679	drm_mode_init(&mode, &phys_enc->cached_mode);
680
681	if (!dpu_encoder_phys_vid_is_master(phys_enc))
682		return -EINVAL;
683
684	if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
685		return -EINVAL;
686
687	phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
688
689	if (s.is_prog_fetch_en && s.is_en) {
690		fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
691		if ((s.line_count > fetch_start) &&
692			(s.line_count <= mode.vtotal))
693			return s.frame_count + 1;
694	}
695
696	return s.frame_count;
697}
698
699static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
700{
701	ops->is_master = dpu_encoder_phys_vid_is_master;
702	ops->enable = dpu_encoder_phys_vid_enable;
703	ops->disable = dpu_encoder_phys_vid_disable;
704	ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
705	ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
706	ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
707	ops->irq_enable = dpu_encoder_phys_vid_irq_enable;
708	ops->irq_disable = dpu_encoder_phys_vid_irq_disable;
709	ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
710	ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
711	ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
712	ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
713	ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
714}
715
716struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
717		struct dpu_enc_phys_init_params *p)
718{
719	struct dpu_encoder_phys *phys_enc = NULL;
720
721	if (!p) {
722		DPU_ERROR("failed to create encoder due to invalid parameter\n");
723		return ERR_PTR(-EINVAL);
724	}
725
726	phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
727	if (!phys_enc) {
728		DPU_ERROR("failed to create encoder due to memory allocation error\n");
729		return ERR_PTR(-ENOMEM);
730	}
731
732	DPU_DEBUG_VIDENC(phys_enc, "\n");
733
734	dpu_encoder_phys_init(phys_enc, p);
735	mutex_init(&phys_enc->vblank_ctl_lock);
736	phys_enc->vblank_refcount = 0;
737
738	dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
739	phys_enc->intf_mode = INTF_MODE_VIDEO;
740	phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
741	phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
742
743	DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
744
745	return phys_enc;
746}
747