1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2021 MediaTek Inc.
4 */
5
6#include <linux/clk.h>
7#include <linux/component.h>
8#include <linux/of.h>
9#include <linux/platform_device.h>
10#include <linux/reset.h>
11#include <linux/soc/mediatek/mtk-cmdq.h>
12
13#include "mtk_drm_ddp_comp.h"
14#include "mtk_drm_drv.h"
15#include "mtk_disp_drv.h"
16
17#define DISP_REG_MERGE_CTRL		0x000
18#define MERGE_EN				1
19#define DISP_REG_MERGE_CFG_0		0x010
20#define DISP_REG_MERGE_CFG_1		0x014
21#define DISP_REG_MERGE_CFG_4		0x020
22#define DISP_REG_MERGE_CFG_10		0x038
23/* no swap */
24#define SWAP_MODE				0
25#define FLD_SWAP_MODE				GENMASK(4, 0)
26#define DISP_REG_MERGE_CFG_12		0x040
27#define CFG_10_10_1PI_2PO_BUF_MODE		6
28#define CFG_10_10_2PI_2PO_BUF_MODE		8
29#define CFG_11_10_1PI_2PO_MERGE			18
30#define FLD_CFG_MERGE_MODE			GENMASK(4, 0)
31#define DISP_REG_MERGE_CFG_24		0x070
32#define DISP_REG_MERGE_CFG_25		0x074
33#define DISP_REG_MERGE_CFG_26		0x078
34#define DISP_REG_MERGE_CFG_27		0x07c
35#define DISP_REG_MERGE_CFG_36		0x0a0
36#define ULTRA_EN				BIT(0)
37#define PREULTRA_EN				BIT(4)
38#define DISP_REG_MERGE_CFG_37		0x0a4
39/* 0: Off, 1: SRAM0, 2: SRAM1, 3: SRAM0 + SRAM1 */
40#define BUFFER_MODE				3
41#define FLD_BUFFER_MODE				GENMASK(1, 0)
42/*
43 * For the ultra and preultra settings, 6us ~ 9us is experience value
44 * and the maximum frequency of mmsys clock is 594MHz.
45 */
46#define DISP_REG_MERGE_CFG_40		0x0b0
47/* 6 us, 594M pixel/sec */
48#define ULTRA_TH_LOW				(6 * 594)
49/* 8 us, 594M pixel/sec */
50#define ULTRA_TH_HIGH				(8 * 594)
51#define FLD_ULTRA_TH_LOW			GENMASK(15, 0)
52#define FLD_ULTRA_TH_HIGH			GENMASK(31, 16)
53#define DISP_REG_MERGE_CFG_41		0x0b4
54/* 8 us, 594M pixel/sec */
55#define PREULTRA_TH_LOW				(8 * 594)
56/* 9 us, 594M pixel/sec */
57#define PREULTRA_TH_HIGH			(9 * 594)
58#define FLD_PREULTRA_TH_LOW			GENMASK(15, 0)
59#define FLD_PREULTRA_TH_HIGH			GENMASK(31, 16)
60
61#define DISP_REG_MERGE_MUTE_0		0xf00
62
63struct mtk_disp_merge {
64	void __iomem			*regs;
65	struct clk			*clk;
66	struct clk			*async_clk;
67	struct cmdq_client_reg		cmdq_reg;
68	bool				fifo_en;
69	bool				mute_support;
70	struct reset_control		*reset_ctl;
71};
72
73void mtk_merge_start(struct device *dev)
74{
75	mtk_merge_start_cmdq(dev, NULL);
76}
77
78void mtk_merge_stop(struct device *dev)
79{
80	mtk_merge_stop_cmdq(dev, NULL);
81}
82
83void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
84{
85	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
86
87	if (priv->mute_support)
88		mtk_ddp_write(cmdq_pkt, 0x0, &priv->cmdq_reg, priv->regs,
89			      DISP_REG_MERGE_MUTE_0);
90
91	mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs,
92		      DISP_REG_MERGE_CTRL);
93}
94
95void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
96{
97	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
98
99	if (priv->mute_support)
100		mtk_ddp_write(cmdq_pkt, 0x1, &priv->cmdq_reg, priv->regs,
101			      DISP_REG_MERGE_MUTE_0);
102
103	mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
104		      DISP_REG_MERGE_CTRL);
105
106	if (!cmdq_pkt && priv->async_clk)
107		reset_control_reset(priv->reset_ctl);
108}
109
110static void mtk_merge_fifo_setting(struct mtk_disp_merge *priv,
111				   struct cmdq_pkt *cmdq_pkt)
112{
113	mtk_ddp_write(cmdq_pkt, ULTRA_EN | PREULTRA_EN,
114		      &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_36);
115
116	mtk_ddp_write_mask(cmdq_pkt, BUFFER_MODE,
117			   &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_37,
118			   FLD_BUFFER_MODE);
119
120	mtk_ddp_write_mask(cmdq_pkt, ULTRA_TH_LOW | ULTRA_TH_HIGH << 16,
121			   &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_40,
122			   FLD_ULTRA_TH_LOW | FLD_ULTRA_TH_HIGH);
123
124	mtk_ddp_write_mask(cmdq_pkt, PREULTRA_TH_LOW | PREULTRA_TH_HIGH << 16,
125			   &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_41,
126			   FLD_PREULTRA_TH_LOW | FLD_PREULTRA_TH_HIGH);
127}
128
129void mtk_merge_config(struct device *dev, unsigned int w,
130		      unsigned int h, unsigned int vrefresh,
131		      unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
132{
133	mtk_merge_advance_config(dev, w, 0, h, vrefresh, bpc, cmdq_pkt);
134}
135
136void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w,
137			      unsigned int h, unsigned int vrefresh, unsigned int bpc,
138			      struct cmdq_pkt *cmdq_pkt)
139{
140	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
141	unsigned int mode = CFG_10_10_1PI_2PO_BUF_MODE;
142
143	if (!h || !l_w) {
144		dev_err(dev, "%s: input width(%d) or height(%d) is invalid\n", __func__, l_w, h);
145		return;
146	}
147
148	if (priv->fifo_en) {
149		mtk_merge_fifo_setting(priv, cmdq_pkt);
150		mode = CFG_10_10_2PI_2PO_BUF_MODE;
151	}
152
153	if (r_w)
154		mode = CFG_11_10_1PI_2PO_MERGE;
155
156	mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
157		      DISP_REG_MERGE_CFG_0);
158	mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
159		      DISP_REG_MERGE_CFG_1);
160	mtk_ddp_write(cmdq_pkt, h << 16 | (l_w + r_w), &priv->cmdq_reg, priv->regs,
161		      DISP_REG_MERGE_CFG_4);
162	/*
163	 * DISP_REG_MERGE_CFG_24 is merge SRAM0 w/h
164	 * DISP_REG_MERGE_CFG_25 is merge SRAM1 w/h.
165	 * If r_w > 0, the merge is in merge mode (input0 and input1 merge together),
166	 * the input0 goes to SRAM0, and input1 goes to SRAM1.
167	 * If r_w = 0, the merge is in buffer mode, the input goes through SRAM0 and
168	 * then to SRAM1. Both SRAM0 and SRAM1 are set to the same size.
169	 */
170	mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
171		      DISP_REG_MERGE_CFG_24);
172	if (r_w)
173		mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
174			      DISP_REG_MERGE_CFG_25);
175	else
176		mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
177			      DISP_REG_MERGE_CFG_25);
178
179	/*
180	 * DISP_REG_MERGE_CFG_26 and DISP_REG_MERGE_CFG_27 is only used in LR merge.
181	 * Only take effect when the merge is setting to merge mode.
182	 */
183	mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
184		      DISP_REG_MERGE_CFG_26);
185	mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
186		      DISP_REG_MERGE_CFG_27);
187
188	mtk_ddp_write_mask(cmdq_pkt, SWAP_MODE, &priv->cmdq_reg, priv->regs,
189			   DISP_REG_MERGE_CFG_10, FLD_SWAP_MODE);
190	mtk_ddp_write_mask(cmdq_pkt, mode, &priv->cmdq_reg, priv->regs,
191			   DISP_REG_MERGE_CFG_12, FLD_CFG_MERGE_MODE);
192}
193
194int mtk_merge_clk_enable(struct device *dev)
195{
196	int ret = 0;
197	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
198
199	ret = clk_prepare_enable(priv->clk);
200	if (ret) {
201		dev_err(dev, "merge clk prepare enable failed\n");
202		return ret;
203	}
204
205	ret = clk_prepare_enable(priv->async_clk);
206	if (ret) {
207		/* should clean up the state of priv->clk */
208		clk_disable_unprepare(priv->clk);
209
210		dev_err(dev, "async clk prepare enable failed\n");
211		return ret;
212	}
213
214	return ret;
215}
216
217void mtk_merge_clk_disable(struct device *dev)
218{
219	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
220
221	clk_disable_unprepare(priv->async_clk);
222	clk_disable_unprepare(priv->clk);
223}
224
225enum drm_mode_status mtk_merge_mode_valid(struct device *dev,
226					  const struct drm_display_mode *mode)
227{
228	struct mtk_disp_merge *priv = dev_get_drvdata(dev);
229	unsigned long rate;
230
231	rate = clk_get_rate(priv->clk);
232
233	/* Convert to KHz and round the number */
234	rate = (rate + 500) / 1000;
235
236	if (rate && mode->clock > rate) {
237		dev_dbg(dev, "invalid clock: %d (>%lu)\n", mode->clock, rate);
238		return MODE_CLOCK_HIGH;
239	}
240
241	/*
242	 * Measure the bandwidth requirement of hardware prefetch (per frame)
243	 *
244	 * let N = prefetch buffer size in lines
245	 *         (ex. N=3, then prefetch buffer size = 3 lines)
246	 *
247	 * prefetch size = htotal * N (pixels)
248	 * time per line = 1 / fps / vtotal (seconds)
249	 * duration      = vbp * time per line
250	 *               = vbp / fps / vtotal
251	 *
252	 * data rate = prefetch size / duration
253	 *           = htotal * N / (vbp / fps / vtotal)
254	 *           = htotal * vtotal * fps * N / vbp
255	 *           = clk * N / vbp (pixels per second)
256	 *
257	 * Say 4K60 (CEA-861) is the maximum mode supported by the SoC
258	 * data rate = 594000K * N / 72 = 8250 (standard)
259	 * (remove K * N due to the same unit)
260	 *
261	 * For 2560x1440@144 (clk=583600K, vbp=17):
262	 * data rate = 583600 / 17 ~= 34329 > 8250 (NG)
263	 *
264	 * For 2560x1440@120 (clk=497760K, vbp=77):
265	 * data rate = 497760 / 77 ~= 6464 < 8250 (OK)
266	 *
267	 * A non-standard 4K60 timing (clk=521280K, vbp=54)
268	 * data rate = 521280 / 54 ~= 9653 > 8250 (NG)
269	 *
270	 * Bandwidth requirement of hardware prefetch increases significantly
271	 * when the VBP decreases (more than 4x in this example).
272	 *
273	 * The proposed formula is only one way to estimate whether our SoC
274	 * supports the mode setting. The basic idea behind it is just to check
275	 * if the data rate requirement is too high (directly proportional to
276	 * pixel clock, inversely proportional to vbp). Please adjust the
277	 * function if it doesn't fit your situation in the future.
278	 */
279	rate = mode->clock / (mode->vtotal - mode->vsync_end);
280
281	if (rate > 8250) {
282		dev_dbg(dev, "invalid rate: %lu (>8250): " DRM_MODE_FMT "\n",
283			rate, DRM_MODE_ARG(mode));
284		return MODE_BAD;
285	}
286
287	return MODE_OK;
288}
289
290static int mtk_disp_merge_bind(struct device *dev, struct device *master,
291			       void *data)
292{
293	return 0;
294}
295
296static void mtk_disp_merge_unbind(struct device *dev, struct device *master,
297				  void *data)
298{
299}
300
301static const struct component_ops mtk_disp_merge_component_ops = {
302	.bind	= mtk_disp_merge_bind,
303	.unbind = mtk_disp_merge_unbind,
304};
305
306static int mtk_disp_merge_probe(struct platform_device *pdev)
307{
308	struct device *dev = &pdev->dev;
309	struct resource *res;
310	struct mtk_disp_merge *priv;
311	int ret;
312
313	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
314	if (!priv)
315		return -ENOMEM;
316
317	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
318	priv->regs = devm_ioremap_resource(dev, res);
319	if (IS_ERR(priv->regs)) {
320		dev_err(dev, "failed to ioremap merge\n");
321		return PTR_ERR(priv->regs);
322	}
323
324	priv->clk = devm_clk_get(dev, NULL);
325	if (IS_ERR(priv->clk)) {
326		dev_err(dev, "failed to get merge clk\n");
327		return PTR_ERR(priv->clk);
328	}
329
330	priv->async_clk = devm_clk_get_optional(dev, "merge_async");
331	if (IS_ERR(priv->async_clk)) {
332		dev_err(dev, "failed to get merge async clock\n");
333		return PTR_ERR(priv->async_clk);
334	}
335
336	if (priv->async_clk) {
337		priv->reset_ctl = devm_reset_control_get_optional_exclusive(dev, NULL);
338		if (IS_ERR(priv->reset_ctl))
339			return PTR_ERR(priv->reset_ctl);
340	}
341
342#if IS_REACHABLE(CONFIG_MTK_CMDQ)
343	ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
344	if (ret)
345		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
346#endif
347
348	priv->fifo_en = of_property_read_bool(dev->of_node,
349					      "mediatek,merge-fifo-en");
350
351	priv->mute_support = of_property_read_bool(dev->of_node,
352						   "mediatek,merge-mute");
353	platform_set_drvdata(pdev, priv);
354
355	ret = component_add(dev, &mtk_disp_merge_component_ops);
356	if (ret != 0)
357		dev_err(dev, "Failed to add component: %d\n", ret);
358
359	return ret;
360}
361
362static void mtk_disp_merge_remove(struct platform_device *pdev)
363{
364	component_del(&pdev->dev, &mtk_disp_merge_component_ops);
365}
366
367static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
368	{ .compatible = "mediatek,mt8195-disp-merge", },
369	{},
370};
371
372MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
373
374struct platform_driver mtk_disp_merge_driver = {
375	.probe = mtk_disp_merge_probe,
376	.remove_new = mtk_disp_merge_remove,
377	.driver = {
378		.name = "mediatek-disp-merge",
379		.owner = THIS_MODULE,
380		.of_match_table = mtk_disp_merge_driver_dt_match,
381	},
382};
383