1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 NXP.
4 */
5
6#include <linux/delay.h>
7#include <linux/dma-mapping.h>
8#include <linux/interrupt.h>
9#include <linux/platform_device.h>
10#include <linux/slab.h>
11
12#include "dcss-dev.h"
13
14#define DCSS_CTXLD_CONTROL_STATUS	0x0
15#define   CTXLD_ENABLE			BIT(0)
16#define   ARB_SEL			BIT(1)
17#define   RD_ERR_EN			BIT(2)
18#define   DB_COMP_EN			BIT(3)
19#define   SB_HP_COMP_EN			BIT(4)
20#define   SB_LP_COMP_EN			BIT(5)
21#define   DB_PEND_SB_REC_EN		BIT(6)
22#define   SB_PEND_DISP_ACTIVE_EN	BIT(7)
23#define   AHB_ERR_EN			BIT(8)
24#define   RD_ERR			BIT(16)
25#define   DB_COMP			BIT(17)
26#define   SB_HP_COMP			BIT(18)
27#define   SB_LP_COMP			BIT(19)
28#define   DB_PEND_SB_REC		BIT(20)
29#define   SB_PEND_DISP_ACTIVE		BIT(21)
30#define   AHB_ERR			BIT(22)
31#define DCSS_CTXLD_DB_BASE_ADDR		0x10
32#define DCSS_CTXLD_DB_COUNT		0x14
33#define DCSS_CTXLD_SB_BASE_ADDR		0x18
34#define DCSS_CTXLD_SB_COUNT		0x1C
35#define   SB_HP_COUNT_POS		0
36#define   SB_HP_COUNT_MASK		0xffff
37#define   SB_LP_COUNT_POS		16
38#define   SB_LP_COUNT_MASK		0xffff0000
39#define DCSS_AHB_ERR_ADDR		0x20
40
41#define CTXLD_IRQ_COMPLETION		(DB_COMP | SB_HP_COMP | SB_LP_COMP)
42#define CTXLD_IRQ_ERROR			(RD_ERR | DB_PEND_SB_REC | AHB_ERR)
43
44/* The following sizes are in context loader entries, 8 bytes each. */
45#define CTXLD_DB_CTX_ENTRIES		1024	/* max 65536 */
46#define CTXLD_SB_LP_CTX_ENTRIES		10240	/* max 65536 */
47#define CTXLD_SB_HP_CTX_ENTRIES		20000	/* max 65536 */
48#define CTXLD_SB_CTX_ENTRIES		(CTXLD_SB_LP_CTX_ENTRIES + \
49					 CTXLD_SB_HP_CTX_ENTRIES)
50
51/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
52static u16 dcss_ctxld_ctx_size[3] = {
53	CTXLD_DB_CTX_ENTRIES,
54	CTXLD_SB_HP_CTX_ENTRIES,
55	CTXLD_SB_LP_CTX_ENTRIES
56};
57
58/* this represents an entry in the context loader map */
59struct dcss_ctxld_item {
60	u32 val;
61	u32 ofs;
62};
63
64#define CTX_ITEM_SIZE			sizeof(struct dcss_ctxld_item)
65
66struct dcss_ctxld {
67	struct device *dev;
68	void __iomem *ctxld_reg;
69	int irq;
70	bool irq_en;
71
72	struct dcss_ctxld_item *db[2];
73	struct dcss_ctxld_item *sb_hp[2];
74	struct dcss_ctxld_item *sb_lp[2];
75
76	dma_addr_t db_paddr[2];
77	dma_addr_t sb_paddr[2];
78
79	u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
80	u8 current_ctx;
81
82	bool in_use;
83	bool armed;
84
85	spinlock_t lock; /* protects concurent access to private data */
86};
87
88static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
89{
90	struct dcss_ctxld *ctxld = data;
91	struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
92	u32 irq_status;
93
94	irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
95
96	if (irq_status & CTXLD_IRQ_COMPLETION &&
97	    !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
98		ctxld->in_use = false;
99
100		if (dcss && dcss->disable_callback)
101			dcss->disable_callback(dcss);
102	} else if (irq_status & CTXLD_IRQ_ERROR) {
103		/*
104		 * Except for throwing an error message and clearing the status
105		 * register, there's not much we can do here.
106		 */
107		dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
108			irq_status);
109		dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
110			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
111			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
112			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
113	}
114
115	dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
116		 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
117
118	return IRQ_HANDLED;
119}
120
121static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
122				 struct platform_device *pdev)
123{
124	int ret;
125
126	ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
127	if (ctxld->irq < 0)
128		return ctxld->irq;
129
130	ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
131			  0, "dcss_ctxld", ctxld);
132	if (ret) {
133		dev_err(ctxld->dev, "ctxld: irq request failed.\n");
134		return ret;
135	}
136
137	ctxld->irq_en = true;
138
139	return 0;
140}
141
142static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
143{
144	dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
145		    DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
146		    ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
147}
148
149static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
150{
151	struct dcss_ctxld_item *ctx;
152	int i;
153
154	for (i = 0; i < 2; i++) {
155		if (ctxld->db[i]) {
156			dma_free_coherent(ctxld->dev,
157					  CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
158					  ctxld->db[i], ctxld->db_paddr[i]);
159			ctxld->db[i] = NULL;
160			ctxld->db_paddr[i] = 0;
161		}
162
163		if (ctxld->sb_hp[i]) {
164			dma_free_coherent(ctxld->dev,
165					  CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
166					  ctxld->sb_hp[i], ctxld->sb_paddr[i]);
167			ctxld->sb_hp[i] = NULL;
168			ctxld->sb_paddr[i] = 0;
169		}
170	}
171}
172
173static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
174{
175	struct dcss_ctxld_item *ctx;
176	int i;
177
178	for (i = 0; i < 2; i++) {
179		ctx = dma_alloc_coherent(ctxld->dev,
180					 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
181					 &ctxld->db_paddr[i], GFP_KERNEL);
182		if (!ctx)
183			return -ENOMEM;
184
185		ctxld->db[i] = ctx;
186
187		ctx = dma_alloc_coherent(ctxld->dev,
188					 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
189					 &ctxld->sb_paddr[i], GFP_KERNEL);
190		if (!ctx)
191			return -ENOMEM;
192
193		ctxld->sb_hp[i] = ctx;
194		ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
195	}
196
197	return 0;
198}
199
200int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
201{
202	struct dcss_ctxld *ctxld;
203	int ret;
204
205	ctxld = devm_kzalloc(dcss->dev, sizeof(*ctxld), GFP_KERNEL);
206	if (!ctxld)
207		return -ENOMEM;
208
209	dcss->ctxld = ctxld;
210	ctxld->dev = dcss->dev;
211
212	spin_lock_init(&ctxld->lock);
213
214	ret = dcss_ctxld_alloc_ctx(ctxld);
215	if (ret) {
216		dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
217		goto err;
218	}
219
220	ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
221	if (!ctxld->ctxld_reg) {
222		dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
223		ret = -ENOMEM;
224		goto err;
225	}
226
227	ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
228	if (ret)
229		goto err;
230
231	dcss_ctxld_hw_cfg(ctxld);
232
233	return 0;
234
235err:
236	dcss_ctxld_free_ctx(ctxld);
237
238	return ret;
239}
240
241void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
242{
243	free_irq(ctxld->irq, ctxld);
244
245	dcss_ctxld_free_ctx(ctxld);
246}
247
248static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
249{
250	int curr_ctx = ctxld->current_ctx;
251	u32 db_base, sb_base, sb_count;
252	u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
253	struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
254
255	if (!dcss)
256		return 0;
257
258	dcss_dpr_write_sysctrl(dcss->dpr);
259
260	dcss_scaler_write_sclctrl(dcss->scaler);
261
262	sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
263	sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
264	db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
265
266	/* make sure SB_LP context area comes after SB_HP */
267	if (sb_lp_cnt &&
268	    ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
269		struct dcss_ctxld_item *sb_lp_adjusted;
270
271		sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
272
273		memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
274		       sb_lp_cnt * CTX_ITEM_SIZE);
275	}
276
277	db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
278
279	dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
280	dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
281
282	if (sb_hp_cnt)
283		sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
284			   ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
285	else
286		sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
287
288	sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
289
290	dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
291	dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
292
293	/* enable the context loader */
294	dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
295
296	ctxld->in_use = true;
297
298	/*
299	 * Toggle the current context to the alternate one so that any updates
300	 * in the modules' settings take place there.
301	 */
302	ctxld->current_ctx ^= 1;
303
304	ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
305	ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
306	ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
307
308	return 0;
309}
310
311int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
312{
313	spin_lock_irq(&ctxld->lock);
314	ctxld->armed = true;
315	spin_unlock_irq(&ctxld->lock);
316
317	return 0;
318}
319
320void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
321{
322	unsigned long flags;
323
324	spin_lock_irqsave(&ctxld->lock, flags);
325	if (ctxld->armed && !ctxld->in_use) {
326		ctxld->armed = false;
327		dcss_ctxld_enable_locked(ctxld);
328	}
329	spin_unlock_irqrestore(&ctxld->lock, flags);
330}
331
332void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
333			      u32 reg_ofs)
334{
335	int curr_ctx = ctxld->current_ctx;
336	struct dcss_ctxld_item *ctx[] = {
337		[CTX_DB] = ctxld->db[curr_ctx],
338		[CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
339		[CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
340	};
341	int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
342
343	if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
344		WARN_ON(1);
345		return;
346	}
347
348	ctx[ctx_id][item_idx].val = val;
349	ctx[ctx_id][item_idx].ofs = reg_ofs;
350	ctxld->ctx_size[curr_ctx][ctx_id] += 1;
351}
352
353void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
354		      u32 val, u32 reg_ofs)
355{
356	spin_lock_irq(&ctxld->lock);
357	dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
358	spin_unlock_irq(&ctxld->lock);
359}
360
361bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
362{
363	return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
364		ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
365		ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
366}
367
368int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
369{
370	dcss_ctxld_hw_cfg(ctxld);
371
372	if (!ctxld->irq_en) {
373		enable_irq(ctxld->irq);
374		ctxld->irq_en = true;
375	}
376
377	return 0;
378}
379
380int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
381{
382	int ret = 0;
383	unsigned long timeout = jiffies + msecs_to_jiffies(500);
384
385	if (!dcss_ctxld_is_flushed(ctxld)) {
386		dcss_ctxld_kick(ctxld);
387
388		while (!time_after(jiffies, timeout) && ctxld->in_use)
389			msleep(20);
390
391		if (time_after(jiffies, timeout))
392			return -ETIMEDOUT;
393	}
394
395	spin_lock_irq(&ctxld->lock);
396
397	if (ctxld->irq_en) {
398		disable_irq_nosync(ctxld->irq);
399		ctxld->irq_en = false;
400	}
401
402	/* reset context region and sizes */
403	ctxld->current_ctx = 0;
404	ctxld->ctx_size[0][CTX_DB] = 0;
405	ctxld->ctx_size[0][CTX_SB_HP] = 0;
406	ctxld->ctx_size[0][CTX_SB_LP] = 0;
407
408	spin_unlock_irq(&ctxld->lock);
409
410	return ret;
411}
412
413void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
414{
415	lockdep_assert_held(&ctxld->lock);
416}
417