1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 */
5
6#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7#include <linux/delay.h>
8#include "dpu_encoder_phys.h"
9#include "dpu_hw_interrupts.h"
10#include "dpu_hw_pingpong.h"
11#include "dpu_core_irq.h"
12#include "dpu_formats.h"
13#include "dpu_trace.h"
14#include "disp/msm_disp_snapshot.h"
15
16#include <drm/drm_managed.h>
17
18#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
19		(e) && (e)->base.parent ? \
20		(e)->base.parent->base.id : -1, \
21		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
22
23#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
24		(e) && (e)->base.parent ? \
25		(e)->base.parent->base.id : -1, \
26		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
27
28#define to_dpu_encoder_phys_cmd(x) \
29	container_of(x, struct dpu_encoder_phys_cmd, base)
30
31#define PP_TIMEOUT_MAX_TRIALS	10
32
33/*
34 * Tearcheck sync start and continue thresholds are empirically found
35 * based on common panels In the future, may want to allow panels to override
36 * these default values
37 */
38#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
39#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
40
41static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
42
43static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
44{
45	return (phys_enc->split_role != ENC_ROLE_SLAVE);
46}
47
48static void _dpu_encoder_phys_cmd_update_intf_cfg(
49		struct dpu_encoder_phys *phys_enc)
50{
51	struct dpu_encoder_phys_cmd *cmd_enc =
52			to_dpu_encoder_phys_cmd(phys_enc);
53	struct dpu_hw_ctl *ctl;
54	struct dpu_hw_intf_cfg intf_cfg = { 0 };
55	struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
56
57	ctl = phys_enc->hw_ctl;
58	if (!ctl->ops.setup_intf_cfg)
59		return;
60
61	intf_cfg.intf = phys_enc->hw_intf->idx;
62	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
63	intf_cfg.stream_sel = cmd_enc->stream_sel;
64	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
65	intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
66	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
67
68	/* setup which pp blk will connect to this intf */
69	if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
70		phys_enc->hw_intf->ops.bind_pingpong_blk(
71				phys_enc->hw_intf,
72				phys_enc->hw_pp->idx);
73
74	if (intf_cfg.dsc != 0)
75		cmd_mode_cfg.data_compress = true;
76
77	cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
78
79	if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
80		phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
81}
82
83static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
84{
85	struct dpu_encoder_phys *phys_enc = arg;
86	unsigned long lock_flags;
87	int new_cnt;
88	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
89
90	if (!phys_enc->hw_pp)
91		return;
92
93	DPU_ATRACE_BEGIN("pp_done_irq");
94	/* notify all synchronous clients first, then asynchronous clients */
95	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
96
97	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
98	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
99	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
100
101	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
102					  phys_enc->hw_pp->idx - PINGPONG_0,
103					  new_cnt, event);
104
105	/* Signal any waiting atomic commit thread */
106	wake_up_all(&phys_enc->pending_kickoff_wq);
107	DPU_ATRACE_END("pp_done_irq");
108}
109
110static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
111{
112	struct dpu_encoder_phys *phys_enc = arg;
113	struct dpu_encoder_phys_cmd *cmd_enc;
114
115	DPU_ATRACE_BEGIN("rd_ptr_irq");
116	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
117
118	dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
119
120	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
121	wake_up_all(&cmd_enc->pending_vblank_wq);
122	DPU_ATRACE_END("rd_ptr_irq");
123}
124
125static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
126{
127	struct dpu_encoder_phys *phys_enc = arg;
128
129	DPU_ATRACE_BEGIN("ctl_start_irq");
130
131	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
132
133	/* Signal any waiting ctl start interrupt */
134	wake_up_all(&phys_enc->pending_kickoff_wq);
135	DPU_ATRACE_END("ctl_start_irq");
136}
137
138static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
139{
140	struct dpu_encoder_phys *phys_enc = arg;
141
142	dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
143}
144
145static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
146		struct dpu_encoder_phys *phys_enc)
147{
148	struct dpu_encoder_phys_cmd *cmd_enc =
149			to_dpu_encoder_phys_cmd(phys_enc);
150	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
151	bool do_log = false;
152	struct drm_encoder *drm_enc;
153
154	if (!phys_enc->hw_pp)
155		return -EINVAL;
156
157	drm_enc = phys_enc->parent;
158
159	cmd_enc->pp_timeout_report_cnt++;
160	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
161		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
162		do_log = true;
163	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
164		do_log = true;
165	}
166
167	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
168		     phys_enc->hw_pp->idx - PINGPONG_0,
169		     cmd_enc->pp_timeout_report_cnt,
170		     atomic_read(&phys_enc->pending_kickoff_cnt),
171		     frame_event);
172
173	/* to avoid flooding, only log first time, and "dead" time */
174	if (do_log) {
175		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
176			  DRMID(drm_enc),
177			  phys_enc->hw_pp->idx - PINGPONG_0,
178			  phys_enc->hw_ctl->idx - CTL_0,
179			  cmd_enc->pp_timeout_report_cnt,
180			  atomic_read(&phys_enc->pending_kickoff_cnt));
181		msm_disp_snapshot_state(drm_enc->dev);
182		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
183				phys_enc->irq[INTR_IDX_RDPTR]);
184	}
185
186	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
187
188	/* request a ctl reset before the next kickoff */
189	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
190
191	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
192
193	return -ETIMEDOUT;
194}
195
196static int _dpu_encoder_phys_cmd_wait_for_idle(
197		struct dpu_encoder_phys *phys_enc)
198{
199	struct dpu_encoder_phys_cmd *cmd_enc =
200			to_dpu_encoder_phys_cmd(phys_enc);
201	struct dpu_encoder_wait_info wait_info;
202	int ret;
203
204	wait_info.wq = &phys_enc->pending_kickoff_wq;
205	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
206	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
207
208	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
209			phys_enc->irq[INTR_IDX_PINGPONG],
210			dpu_encoder_phys_cmd_pp_tx_done_irq,
211			&wait_info);
212	if (ret == -ETIMEDOUT)
213		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
214	else if (!ret)
215		cmd_enc->pp_timeout_report_cnt = 0;
216
217	return ret;
218}
219
220static int dpu_encoder_phys_cmd_control_vblank_irq(
221		struct dpu_encoder_phys *phys_enc,
222		bool enable)
223{
224	int ret = 0;
225	int refcount;
226
227	if (!phys_enc->hw_pp) {
228		DPU_ERROR("invalid encoder\n");
229		return -EINVAL;
230	}
231
232	mutex_lock(&phys_enc->vblank_ctl_lock);
233	refcount = phys_enc->vblank_refcount;
234
235	/* Slave encoders don't report vblank */
236	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
237		goto end;
238
239	/* protect against negative */
240	if (!enable && refcount == 0) {
241		ret = -EINVAL;
242		goto end;
243	}
244
245	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
246		      phys_enc->hw_pp->idx - PINGPONG_0,
247		      enable ? "true" : "false", refcount);
248
249	if (enable) {
250		if (phys_enc->vblank_refcount == 0)
251			ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
252					phys_enc->irq[INTR_IDX_RDPTR],
253					dpu_encoder_phys_cmd_te_rd_ptr_irq,
254					phys_enc);
255		if (!ret)
256			phys_enc->vblank_refcount++;
257	} else if (!enable) {
258		if (phys_enc->vblank_refcount == 1)
259			ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
260					phys_enc->irq[INTR_IDX_RDPTR]);
261		if (!ret)
262			phys_enc->vblank_refcount--;
263	}
264
265end:
266	mutex_unlock(&phys_enc->vblank_ctl_lock);
267	if (ret) {
268		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
269			  DRMID(phys_enc->parent),
270			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
271			  enable ? "true" : "false", refcount);
272	}
273
274	return ret;
275}
276
277static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
278{
279	trace_dpu_enc_phys_cmd_irq_enable(DRMID(phys_enc->parent),
280					  phys_enc->hw_pp->idx - PINGPONG_0,
281					  phys_enc->vblank_refcount);
282
283	phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
284	phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
285
286	if (phys_enc->has_intf_te)
287		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
288	else
289		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
290
291	dpu_core_irq_register_callback(phys_enc->dpu_kms,
292				       phys_enc->irq[INTR_IDX_PINGPONG],
293				       dpu_encoder_phys_cmd_pp_tx_done_irq,
294				       phys_enc);
295	dpu_core_irq_register_callback(phys_enc->dpu_kms,
296				       phys_enc->irq[INTR_IDX_UNDERRUN],
297				       dpu_encoder_phys_cmd_underrun_irq,
298				       phys_enc);
299	dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
300
301	if (dpu_encoder_phys_cmd_is_master(phys_enc))
302		dpu_core_irq_register_callback(phys_enc->dpu_kms,
303					       phys_enc->irq[INTR_IDX_CTL_START],
304					       dpu_encoder_phys_cmd_ctl_start_irq,
305					       phys_enc);
306}
307
308static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
309{
310	trace_dpu_enc_phys_cmd_irq_disable(DRMID(phys_enc->parent),
311					   phys_enc->hw_pp->idx - PINGPONG_0,
312					   phys_enc->vblank_refcount);
313
314	if (dpu_encoder_phys_cmd_is_master(phys_enc))
315		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
316						 phys_enc->irq[INTR_IDX_CTL_START]);
317
318	dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
319	dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
320	dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
321
322	phys_enc->irq[INTR_IDX_CTL_START] = 0;
323	phys_enc->irq[INTR_IDX_PINGPONG] = 0;
324	phys_enc->irq[INTR_IDX_RDPTR] = 0;
325}
326
327static void dpu_encoder_phys_cmd_tearcheck_config(
328		struct dpu_encoder_phys *phys_enc)
329{
330	struct dpu_encoder_phys_cmd *cmd_enc =
331		to_dpu_encoder_phys_cmd(phys_enc);
332	struct dpu_hw_tear_check tc_cfg = { 0 };
333	struct drm_display_mode *mode;
334	bool tc_enable = true;
335	unsigned long vsync_hz;
336	struct dpu_kms *dpu_kms;
337
338	/*
339	 * TODO: if/when resource allocation is refactored, move this to a
340	 * place where the driver can actually return an error.
341	 */
342	if (!phys_enc->has_intf_te &&
343	    (!phys_enc->hw_pp ||
344	     !phys_enc->hw_pp->ops.enable_tearcheck)) {
345		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
346		return;
347	}
348
349	DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
350			 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
351			 phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
352
353	mode = &phys_enc->cached_mode;
354
355	dpu_kms = phys_enc->dpu_kms;
356
357	/*
358	 * TE default: dsi byte clock calculated base on 70 fps;
359	 * around 14 ms to complete a kickoff cycle if te disabled;
360	 * vclk_line base on 60 fps; write is faster than read;
361	 * init == start == rdptr;
362	 *
363	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
364	 * frequency divided by the no. of rows (lines) in the LCDpanel.
365	 */
366	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
367	if (!vsync_hz) {
368		DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
369		return;
370	}
371
372	tc_cfg.vsync_count = vsync_hz /
373				(mode->vtotal * drm_mode_vrefresh(mode));
374
375	/*
376	 * Set the sync_cfg_height to twice vtotal so that if we lose a
377	 * TE event coming from the display TE pin we won't stall immediately
378	 */
379	tc_cfg.hw_vsync_mode = 1;
380	tc_cfg.sync_cfg_height = mode->vtotal * 2;
381	tc_cfg.vsync_init_val = mode->vdisplay;
382	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
383	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
384	tc_cfg.start_pos = mode->vdisplay;
385	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
386
387	DPU_DEBUG_CMDENC(cmd_enc,
388		"tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
389		vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
390	DPU_DEBUG_CMDENC(cmd_enc,
391		"tc enable %u start_pos %u rd_ptr_irq %u\n",
392		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
393	DPU_DEBUG_CMDENC(cmd_enc,
394		"tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
395		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
396		tc_cfg.vsync_init_val);
397	DPU_DEBUG_CMDENC(cmd_enc,
398		"tc cfgheight %u thresh_start %u thresh_cont %u\n",
399		tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
400		tc_cfg.sync_threshold_continue);
401
402	if (phys_enc->has_intf_te)
403		phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
404	else
405		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
406}
407
408static void _dpu_encoder_phys_cmd_pingpong_config(
409		struct dpu_encoder_phys *phys_enc)
410{
411	struct dpu_encoder_phys_cmd *cmd_enc =
412		to_dpu_encoder_phys_cmd(phys_enc);
413
414	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
415		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
416		return;
417	}
418
419	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
420			phys_enc->hw_pp->idx - PINGPONG_0);
421	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
422
423	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
424	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
425}
426
427static bool dpu_encoder_phys_cmd_needs_single_flush(
428		struct dpu_encoder_phys *phys_enc)
429{
430	/**
431	 * we do separate flush for each CTL and let
432	 * CTL_START synchronize them
433	 */
434	return false;
435}
436
437static void dpu_encoder_phys_cmd_enable_helper(
438		struct dpu_encoder_phys *phys_enc)
439{
440	struct dpu_hw_ctl *ctl;
441
442	if (!phys_enc->hw_pp) {
443		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
444		return;
445	}
446
447	dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
448
449	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
450
451	ctl = phys_enc->hw_ctl;
452	ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
453}
454
455static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
456{
457	struct dpu_encoder_phys_cmd *cmd_enc =
458		to_dpu_encoder_phys_cmd(phys_enc);
459
460	if (!phys_enc->hw_pp) {
461		DPU_ERROR("invalid phys encoder\n");
462		return;
463	}
464
465	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
466
467	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
468		DPU_ERROR("already enabled\n");
469		return;
470	}
471
472	dpu_encoder_phys_cmd_enable_helper(phys_enc);
473	phys_enc->enable_state = DPU_ENC_ENABLED;
474}
475
476static void _dpu_encoder_phys_cmd_connect_te(
477		struct dpu_encoder_phys *phys_enc, bool enable)
478{
479	if (phys_enc->has_intf_te) {
480		if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
481			return;
482
483		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
484		phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
485	} else {
486		if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
487			return;
488
489		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
490		phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
491	}
492}
493
494static void dpu_encoder_phys_cmd_prepare_idle_pc(
495		struct dpu_encoder_phys *phys_enc)
496{
497	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
498}
499
500static int dpu_encoder_phys_cmd_get_line_count(
501		struct dpu_encoder_phys *phys_enc)
502{
503	struct dpu_hw_pingpong *hw_pp;
504	struct dpu_hw_intf *hw_intf;
505
506	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
507		return -EINVAL;
508
509	if (phys_enc->has_intf_te) {
510		hw_intf = phys_enc->hw_intf;
511		if (!hw_intf || !hw_intf->ops.get_line_count)
512			return -EINVAL;
513		return hw_intf->ops.get_line_count(hw_intf);
514	}
515
516	hw_pp = phys_enc->hw_pp;
517	if (!hw_pp || !hw_pp->ops.get_line_count)
518		return -EINVAL;
519	return hw_pp->ops.get_line_count(hw_pp);
520}
521
522static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
523{
524	struct dpu_encoder_phys_cmd *cmd_enc =
525		to_dpu_encoder_phys_cmd(phys_enc);
526	struct dpu_hw_ctl *ctl;
527
528	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
529		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
530		return;
531	}
532
533	if (phys_enc->has_intf_te) {
534		DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
535			      phys_enc->hw_intf->idx - INTF_0,
536			      phys_enc->enable_state);
537
538		if (phys_enc->hw_intf->ops.disable_tearcheck)
539			phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
540	} else {
541		if (!phys_enc->hw_pp) {
542			DPU_ERROR("invalid encoder\n");
543			return;
544		}
545
546		DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
547			      phys_enc->hw_pp->idx - PINGPONG_0,
548			      phys_enc->enable_state);
549
550		if (phys_enc->hw_pp->ops.disable_tearcheck)
551			phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
552	}
553
554	if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
555		phys_enc->hw_intf->ops.bind_pingpong_blk(
556				phys_enc->hw_intf,
557				PINGPONG_NONE);
558
559		ctl = phys_enc->hw_ctl;
560		ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
561	}
562
563	phys_enc->enable_state = DPU_ENC_DISABLED;
564}
565
566static void dpu_encoder_phys_cmd_prepare_for_kickoff(
567		struct dpu_encoder_phys *phys_enc)
568{
569	struct dpu_encoder_phys_cmd *cmd_enc =
570			to_dpu_encoder_phys_cmd(phys_enc);
571	int ret;
572
573	if (!phys_enc->hw_pp) {
574		DPU_ERROR("invalid encoder\n");
575		return;
576	}
577	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
578		      phys_enc->hw_pp->idx - PINGPONG_0,
579		      atomic_read(&phys_enc->pending_kickoff_cnt));
580
581	/*
582	 * Mark kickoff request as outstanding. If there are more than one,
583	 * outstanding, then we have to wait for the previous one to complete
584	 */
585	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
586	if (ret) {
587		/* force pending_kickoff_cnt 0 to discard failed kickoff */
588		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
589		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
590			  DRMID(phys_enc->parent), ret,
591			  phys_enc->hw_pp->idx - PINGPONG_0);
592	}
593
594	dpu_encoder_phys_cmd_enable_te(phys_enc);
595
596	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
597			phys_enc->hw_pp->idx - PINGPONG_0,
598			atomic_read(&phys_enc->pending_kickoff_cnt));
599}
600
601static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
602{
603	if (!phys_enc)
604		return;
605	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
606		return;
607
608	if (phys_enc->has_intf_te) {
609		if (!phys_enc->hw_intf->ops.disable_autorefresh)
610			return;
611
612		phys_enc->hw_intf->ops.disable_autorefresh(
613				phys_enc->hw_intf,
614				DRMID(phys_enc->parent),
615				phys_enc->cached_mode.vdisplay);
616	} else {
617		if (!phys_enc->hw_pp ||
618		    !phys_enc->hw_pp->ops.disable_autorefresh)
619			return;
620
621		phys_enc->hw_pp->ops.disable_autorefresh(
622				phys_enc->hw_pp,
623				DRMID(phys_enc->parent),
624				phys_enc->cached_mode.vdisplay);
625	}
626}
627
628static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
629		struct dpu_encoder_phys *phys_enc)
630{
631	struct dpu_encoder_phys_cmd *cmd_enc =
632			to_dpu_encoder_phys_cmd(phys_enc);
633	struct dpu_encoder_wait_info wait_info;
634	int ret;
635
636	wait_info.wq = &phys_enc->pending_kickoff_wq;
637	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
638	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
639
640	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
641			phys_enc->irq[INTR_IDX_CTL_START],
642			dpu_encoder_phys_cmd_ctl_start_irq,
643			&wait_info);
644	if (ret == -ETIMEDOUT) {
645		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
646		ret = -EINVAL;
647	} else if (!ret)
648		ret = 0;
649
650	return ret;
651}
652
653static int dpu_encoder_phys_cmd_wait_for_tx_complete(
654		struct dpu_encoder_phys *phys_enc)
655{
656	int rc;
657
658	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
659	if (rc) {
660		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
661			  DRMID(phys_enc->parent), rc,
662			  phys_enc->hw_intf->idx - INTF_0);
663	}
664
665	return rc;
666}
667
668static int dpu_encoder_phys_cmd_wait_for_commit_done(
669		struct dpu_encoder_phys *phys_enc)
670{
671	/* only required for master controller */
672	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
673		return 0;
674
675	if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
676		return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
677
678	return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
679}
680
681static void dpu_encoder_phys_cmd_handle_post_kickoff(
682		struct dpu_encoder_phys *phys_enc)
683{
684	/**
685	 * re-enable external TE, either for the first time after enabling
686	 * or if disabled for Autorefresh
687	 */
688	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
689}
690
691static void dpu_encoder_phys_cmd_trigger_start(
692		struct dpu_encoder_phys *phys_enc)
693{
694	dpu_encoder_helper_trigger_start(phys_enc);
695}
696
697static void dpu_encoder_phys_cmd_init_ops(
698		struct dpu_encoder_phys_ops *ops)
699{
700	ops->is_master = dpu_encoder_phys_cmd_is_master;
701	ops->enable = dpu_encoder_phys_cmd_enable;
702	ops->disable = dpu_encoder_phys_cmd_disable;
703	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
704	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
705	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
706	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
707	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
708	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
709	ops->irq_enable = dpu_encoder_phys_cmd_irq_enable;
710	ops->irq_disable = dpu_encoder_phys_cmd_irq_disable;
711	ops->restore = dpu_encoder_phys_cmd_enable_helper;
712	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
713	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
714	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
715}
716
717struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
718		struct dpu_enc_phys_init_params *p)
719{
720	struct dpu_encoder_phys *phys_enc = NULL;
721	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
722
723	DPU_DEBUG("intf\n");
724
725	cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
726	if (!cmd_enc) {
727		DPU_ERROR("failed to allocate\n");
728		return ERR_PTR(-ENOMEM);
729	}
730	phys_enc = &cmd_enc->base;
731
732	dpu_encoder_phys_init(phys_enc, p);
733
734	mutex_init(&phys_enc->vblank_ctl_lock);
735	phys_enc->vblank_refcount = 0;
736
737	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
738	phys_enc->intf_mode = INTF_MODE_CMD;
739	phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
740
741	cmd_enc->stream_sel = 0;
742
743	if (!phys_enc->hw_intf) {
744		DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
745		return ERR_PTR(-EINVAL);
746	}
747
748	/* DPU before 5.0 use PINGPONG for TE handling */
749	if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
750		phys_enc->has_intf_te = true;
751
752	if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
753		DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
754		return ERR_PTR(-EINVAL);
755	}
756
757	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
758	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
759
760	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
761
762	return phys_enc;
763}
764