1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cedrus VPU driver
4 *
5 * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6 * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7 * Copyright (C) 2018 Bootlin
8 *
9 * Based on the vim2m driver, that is:
10 *
11 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12 * Pawel Osciak, <pawel@osciak.com>
13 * Marek Szyprowski, <m.szyprowski@samsung.com>
14 */
15
16#include <linux/platform_device.h>
17#include <linux/of.h>
18#include <linux/of_reserved_mem.h>
19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h>
21#include <linux/clk.h>
22#include <linux/pm_runtime.h>
23#include <linux/regmap.h>
24#include <linux/reset.h>
25#include <linux/soc/sunxi/sunxi_sram.h>
26
27#include <media/videobuf2-core.h>
28#include <media/v4l2-mem2mem.h>
29
30#include "cedrus.h"
31#include "cedrus_hw.h"
32#include "cedrus_regs.h"
33
34int cedrus_engine_enable(struct cedrus_ctx *ctx)
35{
36	u32 reg = 0;
37
38	/*
39	 * FIXME: This is only valid on 32-bits DDR's, we should test
40	 * it on the A13/A33.
41	 */
42	reg |= VE_MODE_REC_WR_MODE_2MB;
43	reg |= VE_MODE_DDR_MODE_BW_128;
44
45	switch (ctx->src_fmt.pixelformat) {
46	case V4L2_PIX_FMT_MPEG2_SLICE:
47		reg |= VE_MODE_DEC_MPEG;
48		break;
49
50	/* H.264 and VP8 both use the same decoding mode bit. */
51	case V4L2_PIX_FMT_H264_SLICE:
52	case V4L2_PIX_FMT_VP8_FRAME:
53		reg |= VE_MODE_DEC_H264;
54		break;
55
56	case V4L2_PIX_FMT_HEVC_SLICE:
57		reg |= VE_MODE_DEC_H265;
58		break;
59
60	default:
61		return -EINVAL;
62	}
63
64	if (ctx->src_fmt.width == 4096)
65		reg |= VE_MODE_PIC_WIDTH_IS_4096;
66	if (ctx->src_fmt.width > 2048)
67		reg |= VE_MODE_PIC_WIDTH_MORE_2048;
68
69	cedrus_write(ctx->dev, VE_MODE, reg);
70
71	return 0;
72}
73
74void cedrus_engine_disable(struct cedrus_dev *dev)
75{
76	cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
77}
78
79void cedrus_dst_format_set(struct cedrus_dev *dev,
80			   struct v4l2_pix_format *fmt)
81{
82	unsigned int width = fmt->width;
83	unsigned int height = fmt->height;
84	u32 chroma_size;
85	u32 reg;
86
87	switch (fmt->pixelformat) {
88	case V4L2_PIX_FMT_NV12:
89		chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
90
91		reg = VE_PRIMARY_OUT_FMT_NV12;
92		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
93
94		reg = chroma_size / 2;
95		cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
96
97		reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
98		      VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
99		cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
100
101		break;
102	case V4L2_PIX_FMT_NV12_32L32:
103	default:
104		reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
105		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
106
107		reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
108		cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
109
110		break;
111	}
112}
113
114static irqreturn_t cedrus_irq(int irq, void *data)
115{
116	struct cedrus_dev *dev = data;
117	struct cedrus_ctx *ctx;
118	enum vb2_buffer_state state;
119	enum cedrus_irq_status status;
120
121	/*
122	 * If cancel_delayed_work returns false it means watchdog already
123	 * executed and finished the job.
124	 */
125	if (!cancel_delayed_work(&dev->watchdog_work))
126		return IRQ_HANDLED;
127
128	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
129	if (!ctx) {
130		v4l2_err(&dev->v4l2_dev,
131			 "Instance released before the end of transaction\n");
132		return IRQ_NONE;
133	}
134
135	status = ctx->current_codec->irq_status(ctx);
136	if (status == CEDRUS_IRQ_NONE)
137		return IRQ_NONE;
138
139	ctx->current_codec->irq_disable(ctx);
140	ctx->current_codec->irq_clear(ctx);
141
142	if (status == CEDRUS_IRQ_ERROR)
143		state = VB2_BUF_STATE_ERROR;
144	else
145		state = VB2_BUF_STATE_DONE;
146
147	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
148					 state);
149
150	return IRQ_HANDLED;
151}
152
153void cedrus_watchdog(struct work_struct *work)
154{
155	struct cedrus_dev *dev;
156	struct cedrus_ctx *ctx;
157
158	dev = container_of(to_delayed_work(work),
159			   struct cedrus_dev, watchdog_work);
160
161	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
162	if (!ctx)
163		return;
164
165	v4l2_err(&dev->v4l2_dev, "frame processing timed out!\n");
166	reset_control_reset(dev->rstc);
167	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
168					 VB2_BUF_STATE_ERROR);
169}
170
171int cedrus_hw_suspend(struct device *device)
172{
173	struct cedrus_dev *dev = dev_get_drvdata(device);
174
175	clk_disable_unprepare(dev->ram_clk);
176	clk_disable_unprepare(dev->mod_clk);
177	clk_disable_unprepare(dev->ahb_clk);
178
179	reset_control_assert(dev->rstc);
180
181	return 0;
182}
183
184int cedrus_hw_resume(struct device *device)
185{
186	struct cedrus_dev *dev = dev_get_drvdata(device);
187	int ret;
188
189	ret = reset_control_reset(dev->rstc);
190	if (ret) {
191		dev_err(dev->dev, "Failed to apply reset\n");
192
193		return ret;
194	}
195
196	ret = clk_prepare_enable(dev->ahb_clk);
197	if (ret) {
198		dev_err(dev->dev, "Failed to enable AHB clock\n");
199
200		goto err_rst;
201	}
202
203	ret = clk_prepare_enable(dev->mod_clk);
204	if (ret) {
205		dev_err(dev->dev, "Failed to enable MOD clock\n");
206
207		goto err_ahb_clk;
208	}
209
210	ret = clk_prepare_enable(dev->ram_clk);
211	if (ret) {
212		dev_err(dev->dev, "Failed to enable RAM clock\n");
213
214		goto err_mod_clk;
215	}
216
217	return 0;
218
219err_mod_clk:
220	clk_disable_unprepare(dev->mod_clk);
221err_ahb_clk:
222	clk_disable_unprepare(dev->ahb_clk);
223err_rst:
224	reset_control_assert(dev->rstc);
225
226	return ret;
227}
228
229int cedrus_hw_probe(struct cedrus_dev *dev)
230{
231	const struct cedrus_variant *variant;
232	int irq_dec;
233	int ret;
234
235	variant = of_device_get_match_data(dev->dev);
236	if (!variant)
237		return -EINVAL;
238
239	dev->capabilities = variant->capabilities;
240
241	irq_dec = platform_get_irq(dev->pdev, 0);
242	if (irq_dec <= 0)
243		return irq_dec;
244	ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
245			       0, dev_name(dev->dev), dev);
246	if (ret) {
247		dev_err(dev->dev, "Failed to request IRQ\n");
248
249		return ret;
250	}
251
252	ret = of_reserved_mem_device_init(dev->dev);
253	if (ret && ret != -ENODEV) {
254		dev_err(dev->dev, "Failed to reserve memory\n");
255
256		return ret;
257	}
258
259	ret = sunxi_sram_claim(dev->dev);
260	if (ret) {
261		dev_err(dev->dev, "Failed to claim SRAM\n");
262
263		goto err_mem;
264	}
265
266	dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
267	if (IS_ERR(dev->ahb_clk)) {
268		dev_err(dev->dev, "Failed to get AHB clock\n");
269
270		ret = PTR_ERR(dev->ahb_clk);
271		goto err_sram;
272	}
273
274	dev->mod_clk = devm_clk_get(dev->dev, "mod");
275	if (IS_ERR(dev->mod_clk)) {
276		dev_err(dev->dev, "Failed to get MOD clock\n");
277
278		ret = PTR_ERR(dev->mod_clk);
279		goto err_sram;
280	}
281
282	dev->ram_clk = devm_clk_get(dev->dev, "ram");
283	if (IS_ERR(dev->ram_clk)) {
284		dev_err(dev->dev, "Failed to get RAM clock\n");
285
286		ret = PTR_ERR(dev->ram_clk);
287		goto err_sram;
288	}
289
290	dev->rstc = devm_reset_control_get(dev->dev, NULL);
291	if (IS_ERR(dev->rstc)) {
292		dev_err(dev->dev, "Failed to get reset control\n");
293
294		ret = PTR_ERR(dev->rstc);
295		goto err_sram;
296	}
297
298	dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
299	if (IS_ERR(dev->base)) {
300		dev_err(dev->dev, "Failed to map registers\n");
301
302		ret = PTR_ERR(dev->base);
303		goto err_sram;
304	}
305
306	ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
307	if (ret) {
308		dev_err(dev->dev, "Failed to set clock rate\n");
309
310		goto err_sram;
311	}
312
313	pm_runtime_enable(dev->dev);
314	if (!pm_runtime_enabled(dev->dev)) {
315		ret = cedrus_hw_resume(dev->dev);
316		if (ret)
317			goto err_pm;
318	}
319
320	return 0;
321
322err_pm:
323	pm_runtime_disable(dev->dev);
324err_sram:
325	sunxi_sram_release(dev->dev);
326err_mem:
327	of_reserved_mem_device_release(dev->dev);
328
329	return ret;
330}
331
332void cedrus_hw_remove(struct cedrus_dev *dev)
333{
334	pm_runtime_disable(dev->dev);
335	if (!pm_runtime_status_suspended(dev->dev))
336		cedrus_hw_suspend(dev->dev);
337
338	sunxi_sram_release(dev->dev);
339
340	of_reserved_mem_device_release(dev->dev);
341}
342