• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/arm/common/
1/* linux/arch/arm/common/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co Ltd.
4 *	Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/io.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30
31#include <asm/hardware/pl330.h>
32
33/* Register and Bit field Definitions */
34#define DS		0x0
35#define DS_ST_STOP	0x0
36#define DS_ST_EXEC	0x1
37#define DS_ST_CMISS	0x2
38#define DS_ST_UPDTPC	0x3
39#define DS_ST_WFE	0x4
40#define DS_ST_ATBRR	0x5
41#define DS_ST_QBUSY	0x6
42#define DS_ST_WFP	0x7
43#define DS_ST_KILL	0x8
44#define DS_ST_CMPLT	0x9
45#define DS_ST_FLTCMP	0xe
46#define DS_ST_FAULT	0xf
47
48#define DPC		0x4
49#define INTEN		0x20
50#define ES		0x24
51#define INTSTATUS	0x28
52#define INTCLR		0x2c
53#define FSM		0x30
54#define FSC		0x34
55#define FTM		0x38
56
57#define _FTC		0x40
58#define FTC(n)		(_FTC + (n)*0x4)
59
60#define _CS		0x100
61#define CS(n)		(_CS + (n)*0x8)
62#define CS_CNS		(1 << 21)
63
64#define _CPC		0x104
65#define CPC(n)		(_CPC + (n)*0x8)
66
67#define _SA		0x400
68#define SA(n)		(_SA + (n)*0x20)
69
70#define _DA		0x404
71#define DA(n)		(_DA + (n)*0x20)
72
73#define _CC		0x408
74#define CC(n)		(_CC + (n)*0x20)
75
76#define CC_SRCINC	(1 << 0)
77#define CC_DSTINC	(1 << 14)
78#define CC_SRCPRI	(1 << 8)
79#define CC_DSTPRI	(1 << 22)
80#define CC_SRCNS	(1 << 9)
81#define CC_DSTNS	(1 << 23)
82#define CC_SRCIA	(1 << 10)
83#define CC_DSTIA	(1 << 24)
84#define CC_SRCBRSTLEN_SHFT	4
85#define CC_DSTBRSTLEN_SHFT	18
86#define CC_SRCBRSTSIZE_SHFT	1
87#define CC_DSTBRSTSIZE_SHFT	15
88#define CC_SRCCCTRL_SHFT	11
89#define CC_SRCCCTRL_MASK	0x7
90#define CC_DSTCCTRL_SHFT	25
91#define CC_DRCCCTRL_MASK	0x7
92#define CC_SWAP_SHFT	28
93
94#define _LC0		0x40c
95#define LC0(n)		(_LC0 + (n)*0x20)
96
97#define _LC1		0x410
98#define LC1(n)		(_LC1 + (n)*0x20)
99
100#define DBGSTATUS	0xd00
101#define DBG_BUSY	(1 << 0)
102
103#define DBGCMD		0xd04
104#define DBGINST0	0xd08
105#define DBGINST1	0xd0c
106
107#define CR0		0xe00
108#define CR1		0xe04
109#define CR2		0xe08
110#define CR3		0xe0c
111#define CR4		0xe10
112#define CRD		0xe14
113
114#define PERIPH_ID	0xfe0
115#define PCELL_ID	0xff0
116
117#define CR0_PERIPH_REQ_SET	(1 << 0)
118#define CR0_BOOT_EN_SET		(1 << 1)
119#define CR0_BOOT_MAN_NS		(1 << 2)
120#define CR0_NUM_CHANS_SHIFT	4
121#define CR0_NUM_CHANS_MASK	0x7
122#define CR0_NUM_PERIPH_SHIFT	12
123#define CR0_NUM_PERIPH_MASK	0x1f
124#define CR0_NUM_EVENTS_SHIFT	17
125#define CR0_NUM_EVENTS_MASK	0x1f
126
127#define CR1_ICACHE_LEN_SHIFT	0
128#define CR1_ICACHE_LEN_MASK	0x7
129#define CR1_NUM_ICACHELINES_SHIFT	4
130#define CR1_NUM_ICACHELINES_MASK	0xf
131
132#define CRD_DATA_WIDTH_SHIFT	0
133#define CRD_DATA_WIDTH_MASK	0x7
134#define CRD_WR_CAP_SHIFT	4
135#define CRD_WR_CAP_MASK		0x7
136#define CRD_WR_Q_DEP_SHIFT	8
137#define CRD_WR_Q_DEP_MASK	0xf
138#define CRD_RD_CAP_SHIFT	12
139#define CRD_RD_CAP_MASK		0x7
140#define CRD_RD_Q_DEP_SHIFT	16
141#define CRD_RD_Q_DEP_MASK	0xf
142#define CRD_DATA_BUFF_SHIFT	20
143#define CRD_DATA_BUFF_MASK	0x3ff
144
145#define	PART		0x330
146#define DESIGNER	0x41
147#define REVISION	0x0
148#define INTEG_CFG	0x0
149#define PERIPH_ID_VAL	((PART << 0) | (DESIGNER << 12) \
150			  | (REVISION << 20) | (INTEG_CFG << 24))
151
152#define PCELL_ID_VAL	0xb105f00d
153
154#define PL330_STATE_STOPPED		(1 << 0)
155#define PL330_STATE_EXECUTING		(1 << 1)
156#define PL330_STATE_WFE			(1 << 2)
157#define PL330_STATE_FAULTING		(1 << 3)
158#define PL330_STATE_COMPLETING		(1 << 4)
159#define PL330_STATE_WFP			(1 << 5)
160#define PL330_STATE_KILLING		(1 << 6)
161#define PL330_STATE_FAULT_COMPLETING	(1 << 7)
162#define PL330_STATE_CACHEMISS		(1 << 8)
163#define PL330_STATE_UPDTPC		(1 << 9)
164#define PL330_STATE_ATBARRIER		(1 << 10)
165#define PL330_STATE_QUEUEBUSY		(1 << 11)
166#define PL330_STATE_INVALID		(1 << 15)
167
168#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
169				| PL330_STATE_WFE | PL330_STATE_FAULTING)
170
171#define CMD_DMAADDH	0x54
172#define CMD_DMAEND	0x00
173#define CMD_DMAFLUSHP	0x35
174#define CMD_DMAGO	0xa0
175#define CMD_DMALD	0x04
176#define CMD_DMALDP	0x25
177#define CMD_DMALP	0x20
178#define CMD_DMALPEND	0x28
179#define CMD_DMAKILL	0x01
180#define CMD_DMAMOV	0xbc
181#define CMD_DMANOP	0x18
182#define CMD_DMARMB	0x12
183#define CMD_DMASEV	0x34
184#define CMD_DMAST	0x08
185#define CMD_DMASTP	0x29
186#define CMD_DMASTZ	0x0c
187#define CMD_DMAWFE	0x36
188#define CMD_DMAWFP	0x30
189#define CMD_DMAWMB	0x13
190
191#define SZ_DMAADDH	3
192#define SZ_DMAEND	1
193#define SZ_DMAFLUSHP	2
194#define SZ_DMALD	1
195#define SZ_DMALDP	2
196#define SZ_DMALP	2
197#define SZ_DMALPEND	2
198#define SZ_DMAKILL	1
199#define SZ_DMAMOV	6
200#define SZ_DMANOP	1
201#define SZ_DMARMB	1
202#define SZ_DMASEV	2
203#define SZ_DMAST	1
204#define SZ_DMASTP	2
205#define SZ_DMASTZ	1
206#define SZ_DMAWFE	2
207#define SZ_DMAWFP	2
208#define SZ_DMAWMB	1
209#define SZ_DMAGO	6
210
211#define BRST_LEN(ccr)	((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
212#define BRST_SIZE(ccr)	(1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
213
214#define BYTE_TO_BURST(b, ccr)  ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
215#define BURST_TO_BYTE(c, ccr)  ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
216
217/*
218 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
219 * at 1byte/burst for P<->M and M<->M respectively.
220 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
221 * should be enough for P<->M and M<->M respectively.
222 */
223#define MCODE_BUFF_PER_REQ	256
224
225/*
226 * Mark a _pl330_req as free.
227 * We do it by writing DMAEND as the first instruction
228 * because no valid request is going to have DMAEND as
229 * its first instruction to execute.
230 */
231#define MARK_FREE(req)	do { \
232				_emit_END(0, (req)->mc_cpu); \
233				(req)->mc_len = 0; \
234			} while (0)
235
236/* If the _pl330_req is available to the client */
237#define IS_FREE(req)	(*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
238
239/* Use this _only_ to wait on transient states */
240#define UNTIL(t, s)	while (!(_state(t) & (s))) cpu_relax();
241
242#ifdef PL330_DEBUG_MCGEN
243static unsigned cmd_line;
244#define PL330_DBGCMD_DUMP(off, x...)	do { \
245						printk("%x:", cmd_line); \
246						printk(x); \
247						cmd_line += off; \
248					} while (0)
249#define PL330_DBGMC_START(addr)		(cmd_line = addr)
250#else
251#define PL330_DBGCMD_DUMP(off, x...)	do {} while (0)
252#define PL330_DBGMC_START(addr)		do {} while (0)
253#endif
254
255struct _xfer_spec {
256	u32 ccr;
257	struct pl330_req *r;
258	struct pl330_xfer *x;
259};
260
261enum dmamov_dst {
262	SAR = 0,
263	CCR,
264	DAR,
265};
266
267enum pl330_dst {
268	SRC = 0,
269	DST,
270};
271
272enum pl330_cond {
273	SINGLE,
274	BURST,
275	ALWAYS,
276};
277
278struct _pl330_req {
279	u32 mc_bus;
280	void *mc_cpu;
281	/* Number of bytes taken to setup MC for the req */
282	u32 mc_len;
283	struct pl330_req *r;
284	/* Hook to attach to DMAC's list of reqs with due callback */
285	struct list_head rqd;
286};
287
288/* ToBeDone for tasklet */
289struct _pl330_tbd {
290	bool reset_dmac;
291	bool reset_mngr;
292	u8 reset_chan;
293};
294
295/* A DMAC Thread */
296struct pl330_thread {
297	u8 id;
298	int ev;
299	/* If the channel is not yet acquired by any client */
300	bool free;
301	/* Parent DMAC */
302	struct pl330_dmac *dmac;
303	/* Only two at a time */
304	struct _pl330_req req[2];
305	/* Index of the last submitted request */
306	unsigned lstenq;
307};
308
309enum pl330_dmac_state {
310	UNINIT,
311	INIT,
312	DYING,
313};
314
315/* A DMAC */
316struct pl330_dmac {
317	spinlock_t		lock;
318	/* Holds list of reqs with due callbacks */
319	struct list_head	req_done;
320	/* Pointer to platform specific stuff */
321	struct pl330_info	*pinfo;
322	/* Maximum possible events/irqs */
323	int			events[32];
324	/* BUS address of MicroCode buffer */
325	u32			mcode_bus;
326	/* CPU address of MicroCode buffer */
327	void			*mcode_cpu;
328	/* List of all Channel threads */
329	struct pl330_thread	*channels;
330	/* Pointer to the MANAGER thread */
331	struct pl330_thread	*manager;
332	/* To handle bad news in interrupt */
333	struct tasklet_struct	tasks;
334	struct _pl330_tbd	dmac_tbd;
335	/* State of DMAC operation */
336	enum pl330_dmac_state	state;
337};
338
339static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
340{
341	if (r && r->xfer_cb)
342		r->xfer_cb(r->token, err);
343}
344
345static inline bool _queue_empty(struct pl330_thread *thrd)
346{
347	return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
348		? true : false;
349}
350
351static inline bool _queue_full(struct pl330_thread *thrd)
352{
353	return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
354		? false : true;
355}
356
357static inline bool is_manager(struct pl330_thread *thrd)
358{
359	struct pl330_dmac *pl330 = thrd->dmac;
360
361	/* MANAGER is indexed at the end */
362	if (thrd->id == pl330->pinfo->pcfg.num_chan)
363		return true;
364	else
365		return false;
366}
367
368/* If manager of the thread is in Non-Secure mode */
369static inline bool _manager_ns(struct pl330_thread *thrd)
370{
371	struct pl330_dmac *pl330 = thrd->dmac;
372
373	return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
374}
375
376static inline u32 get_id(struct pl330_info *pi, u32 off)
377{
378	void __iomem *regs = pi->base;
379	u32 id = 0;
380
381	id |= (readb(regs + off + 0x0) << 0);
382	id |= (readb(regs + off + 0x4) << 8);
383	id |= (readb(regs + off + 0x8) << 16);
384	id |= (readb(regs + off + 0xc) << 24);
385
386	return id;
387}
388
389static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
390		enum pl330_dst da, u16 val)
391{
392	if (dry_run)
393		return SZ_DMAADDH;
394
395	buf[0] = CMD_DMAADDH;
396	buf[0] |= (da << 1);
397	*((u16 *)&buf[1]) = val;
398
399	PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
400		da == 1 ? "DA" : "SA", val);
401
402	return SZ_DMAADDH;
403}
404
405static inline u32 _emit_END(unsigned dry_run, u8 buf[])
406{
407	if (dry_run)
408		return SZ_DMAEND;
409
410	buf[0] = CMD_DMAEND;
411
412	PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
413
414	return SZ_DMAEND;
415}
416
417static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
418{
419	if (dry_run)
420		return SZ_DMAFLUSHP;
421
422	buf[0] = CMD_DMAFLUSHP;
423
424	peri &= 0x1f;
425	peri <<= 3;
426	buf[1] = peri;
427
428	PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
429
430	return SZ_DMAFLUSHP;
431}
432
433static inline u32 _emit_LD(unsigned dry_run, u8 buf[],	enum pl330_cond cond)
434{
435	if (dry_run)
436		return SZ_DMALD;
437
438	buf[0] = CMD_DMALD;
439
440	if (cond == SINGLE)
441		buf[0] |= (0 << 1) | (1 << 0);
442	else if (cond == BURST)
443		buf[0] |= (1 << 1) | (1 << 0);
444
445	PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
446		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
447
448	return SZ_DMALD;
449}
450
451static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
452		enum pl330_cond cond, u8 peri)
453{
454	if (dry_run)
455		return SZ_DMALDP;
456
457	buf[0] = CMD_DMALDP;
458
459	if (cond == BURST)
460		buf[0] |= (1 << 1);
461
462	peri &= 0x1f;
463	peri <<= 3;
464	buf[1] = peri;
465
466	PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
467		cond == SINGLE ? 'S' : 'B', peri >> 3);
468
469	return SZ_DMALDP;
470}
471
472static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
473		unsigned loop, u8 cnt)
474{
475	if (dry_run)
476		return SZ_DMALP;
477
478	buf[0] = CMD_DMALP;
479
480	if (loop)
481		buf[0] |= (1 << 1);
482
483	cnt--; /* DMAC increments by 1 internally */
484	buf[1] = cnt;
485
486	PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
487
488	return SZ_DMALP;
489}
490
491struct _arg_LPEND {
492	enum pl330_cond cond;
493	bool forever;
494	unsigned loop;
495	u8 bjump;
496};
497
498static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
499		const struct _arg_LPEND *arg)
500{
501	enum pl330_cond cond = arg->cond;
502	bool forever = arg->forever;
503	unsigned loop = arg->loop;
504	u8 bjump = arg->bjump;
505
506	if (dry_run)
507		return SZ_DMALPEND;
508
509	buf[0] = CMD_DMALPEND;
510
511	if (loop)
512		buf[0] |= (1 << 2);
513
514	if (!forever)
515		buf[0] |= (1 << 4);
516
517	if (cond == SINGLE)
518		buf[0] |= (0 << 1) | (1 << 0);
519	else if (cond == BURST)
520		buf[0] |= (1 << 1) | (1 << 0);
521
522	buf[1] = bjump;
523
524	PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
525			forever ? "FE" : "END",
526			cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
527			loop ? '1' : '0',
528			bjump);
529
530	return SZ_DMALPEND;
531}
532
533static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
534{
535	if (dry_run)
536		return SZ_DMAKILL;
537
538	buf[0] = CMD_DMAKILL;
539
540	return SZ_DMAKILL;
541}
542
543static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
544		enum dmamov_dst dst, u32 val)
545{
546	if (dry_run)
547		return SZ_DMAMOV;
548
549	buf[0] = CMD_DMAMOV;
550	buf[1] = dst;
551	*((u32 *)&buf[2]) = val;
552
553	PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
554		dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
555
556	return SZ_DMAMOV;
557}
558
559static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
560{
561	if (dry_run)
562		return SZ_DMANOP;
563
564	buf[0] = CMD_DMANOP;
565
566	PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
567
568	return SZ_DMANOP;
569}
570
571static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
572{
573	if (dry_run)
574		return SZ_DMARMB;
575
576	buf[0] = CMD_DMARMB;
577
578	PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
579
580	return SZ_DMARMB;
581}
582
583static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
584{
585	if (dry_run)
586		return SZ_DMASEV;
587
588	buf[0] = CMD_DMASEV;
589
590	ev &= 0x1f;
591	ev <<= 3;
592	buf[1] = ev;
593
594	PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
595
596	return SZ_DMASEV;
597}
598
599static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
600{
601	if (dry_run)
602		return SZ_DMAST;
603
604	buf[0] = CMD_DMAST;
605
606	if (cond == SINGLE)
607		buf[0] |= (0 << 1) | (1 << 0);
608	else if (cond == BURST)
609		buf[0] |= (1 << 1) | (1 << 0);
610
611	PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
612		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
613
614	return SZ_DMAST;
615}
616
617static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
618		enum pl330_cond cond, u8 peri)
619{
620	if (dry_run)
621		return SZ_DMASTP;
622
623	buf[0] = CMD_DMASTP;
624
625	if (cond == BURST)
626		buf[0] |= (1 << 1);
627
628	peri &= 0x1f;
629	peri <<= 3;
630	buf[1] = peri;
631
632	PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
633		cond == SINGLE ? 'S' : 'B', peri >> 3);
634
635	return SZ_DMASTP;
636}
637
638static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
639{
640	if (dry_run)
641		return SZ_DMASTZ;
642
643	buf[0] = CMD_DMASTZ;
644
645	PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
646
647	return SZ_DMASTZ;
648}
649
650static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
651		unsigned invalidate)
652{
653	if (dry_run)
654		return SZ_DMAWFE;
655
656	buf[0] = CMD_DMAWFE;
657
658	ev &= 0x1f;
659	ev <<= 3;
660	buf[1] = ev;
661
662	if (invalidate)
663		buf[1] |= (1 << 1);
664
665	PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
666		ev >> 3, invalidate ? ", I" : "");
667
668	return SZ_DMAWFE;
669}
670
671static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
672		enum pl330_cond cond, u8 peri)
673{
674	if (dry_run)
675		return SZ_DMAWFP;
676
677	buf[0] = CMD_DMAWFP;
678
679	if (cond == SINGLE)
680		buf[0] |= (0 << 1) | (0 << 0);
681	else if (cond == BURST)
682		buf[0] |= (1 << 1) | (0 << 0);
683	else
684		buf[0] |= (0 << 1) | (1 << 0);
685
686	peri &= 0x1f;
687	peri <<= 3;
688	buf[1] = peri;
689
690	PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
691		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
692
693	return SZ_DMAWFP;
694}
695
696static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
697{
698	if (dry_run)
699		return SZ_DMAWMB;
700
701	buf[0] = CMD_DMAWMB;
702
703	PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
704
705	return SZ_DMAWMB;
706}
707
708struct _arg_GO {
709	u8 chan;
710	u32 addr;
711	unsigned ns;
712};
713
714static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
715		const struct _arg_GO *arg)
716{
717	u8 chan = arg->chan;
718	u32 addr = arg->addr;
719	unsigned ns = arg->ns;
720
721	if (dry_run)
722		return SZ_DMAGO;
723
724	buf[0] = CMD_DMAGO;
725	buf[0] |= (ns << 1);
726
727	buf[1] = chan & 0x7;
728
729	*((u32 *)&buf[2]) = addr;
730
731	return SZ_DMAGO;
732}
733
734#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
735
736/* Returns Time-Out */
737static bool _until_dmac_idle(struct pl330_thread *thrd)
738{
739	void __iomem *regs = thrd->dmac->pinfo->base;
740	unsigned long loops = msecs_to_loops(5);
741
742	do {
743		/* Until Manager is Idle */
744		if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
745			break;
746
747		cpu_relax();
748	} while (--loops);
749
750	if (!loops)
751		return true;
752
753	return false;
754}
755
756static inline void _execute_DBGINSN(struct pl330_thread *thrd,
757		u8 insn[], bool as_manager)
758{
759	void __iomem *regs = thrd->dmac->pinfo->base;
760	u32 val;
761
762	val = (insn[0] << 16) | (insn[1] << 24);
763	if (!as_manager) {
764		val |= (1 << 0);
765		val |= (thrd->id << 8); /* Channel Number */
766	}
767	writel(val, regs + DBGINST0);
768
769	val = *((u32 *)&insn[2]);
770	writel(val, regs + DBGINST1);
771
772	/* If timed out due to halted state-machine */
773	if (_until_dmac_idle(thrd)) {
774		dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
775		return;
776	}
777
778	/* Get going */
779	writel(0, regs + DBGCMD);
780}
781
782static inline u32 _state(struct pl330_thread *thrd)
783{
784	void __iomem *regs = thrd->dmac->pinfo->base;
785	u32 val;
786
787	if (is_manager(thrd))
788		val = readl(regs + DS) & 0xf;
789	else
790		val = readl(regs + CS(thrd->id)) & 0xf;
791
792	switch (val) {
793	case DS_ST_STOP:
794		return PL330_STATE_STOPPED;
795	case DS_ST_EXEC:
796		return PL330_STATE_EXECUTING;
797	case DS_ST_CMISS:
798		return PL330_STATE_CACHEMISS;
799	case DS_ST_UPDTPC:
800		return PL330_STATE_UPDTPC;
801	case DS_ST_WFE:
802		return PL330_STATE_WFE;
803	case DS_ST_FAULT:
804		return PL330_STATE_FAULTING;
805	case DS_ST_ATBRR:
806		if (is_manager(thrd))
807			return PL330_STATE_INVALID;
808		else
809			return PL330_STATE_ATBARRIER;
810	case DS_ST_QBUSY:
811		if (is_manager(thrd))
812			return PL330_STATE_INVALID;
813		else
814			return PL330_STATE_QUEUEBUSY;
815	case DS_ST_WFP:
816		if (is_manager(thrd))
817			return PL330_STATE_INVALID;
818		else
819			return PL330_STATE_WFP;
820	case DS_ST_KILL:
821		if (is_manager(thrd))
822			return PL330_STATE_INVALID;
823		else
824			return PL330_STATE_KILLING;
825	case DS_ST_CMPLT:
826		if (is_manager(thrd))
827			return PL330_STATE_INVALID;
828		else
829			return PL330_STATE_COMPLETING;
830	case DS_ST_FLTCMP:
831		if (is_manager(thrd))
832			return PL330_STATE_INVALID;
833		else
834			return PL330_STATE_FAULT_COMPLETING;
835	default:
836		return PL330_STATE_INVALID;
837	}
838}
839
840/* If the request 'req' of thread 'thrd' is currently active */
841static inline bool _req_active(struct pl330_thread *thrd,
842		struct _pl330_req *req)
843{
844	void __iomem *regs = thrd->dmac->pinfo->base;
845	u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
846
847	if (IS_FREE(req))
848		return false;
849
850	return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
851}
852
853/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
854static inline unsigned _thrd_active(struct pl330_thread *thrd)
855{
856	if (_req_active(thrd, &thrd->req[0]))
857		return 1; /* First req active */
858
859	if (_req_active(thrd, &thrd->req[1]))
860		return 2; /* Second req active */
861
862	return 0;
863}
864
865static void _stop(struct pl330_thread *thrd)
866{
867	void __iomem *regs = thrd->dmac->pinfo->base;
868	u8 insn[6] = {0, 0, 0, 0, 0, 0};
869
870	if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
871		UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
872
873	/* Return if nothing needs to be done */
874	if (_state(thrd) == PL330_STATE_COMPLETING
875		  || _state(thrd) == PL330_STATE_KILLING
876		  || _state(thrd) == PL330_STATE_STOPPED)
877		return;
878
879	_emit_KILL(0, insn);
880
881	/* Stop generating interrupts for SEV */
882	writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
883
884	_execute_DBGINSN(thrd, insn, is_manager(thrd));
885}
886
887/* Start doing req 'idx' of thread 'thrd' */
888static bool _trigger(struct pl330_thread *thrd)
889{
890	void __iomem *regs = thrd->dmac->pinfo->base;
891	struct _pl330_req *req;
892	struct pl330_req *r;
893	struct _arg_GO go;
894	unsigned ns;
895	u8 insn[6] = {0, 0, 0, 0, 0, 0};
896
897	/* Return if already ACTIVE */
898	if (_state(thrd) != PL330_STATE_STOPPED)
899		return true;
900
901	if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
902		req = &thrd->req[1 - thrd->lstenq];
903	else if (!IS_FREE(&thrd->req[thrd->lstenq]))
904		req = &thrd->req[thrd->lstenq];
905	else
906		req = NULL;
907
908	/* Return if no request */
909	if (!req || !req->r)
910		return true;
911
912	r = req->r;
913
914	if (r->cfg)
915		ns = r->cfg->nonsecure ? 1 : 0;
916	else if (readl(regs + CS(thrd->id)) & CS_CNS)
917		ns = 1;
918	else
919		ns = 0;
920
921	/* See 'Abort Sources' point-4 at Page 2-25 */
922	if (_manager_ns(thrd) && !ns)
923		dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
924			__func__, __LINE__);
925
926	go.chan = thrd->id;
927	go.addr = req->mc_bus;
928	go.ns = ns;
929	_emit_GO(0, insn, &go);
930
931	/* Set to generate interrupts for SEV */
932	writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
933
934	/* Only manager can execute GO */
935	_execute_DBGINSN(thrd, insn, true);
936
937	return true;
938}
939
940static bool _start(struct pl330_thread *thrd)
941{
942	switch (_state(thrd)) {
943	case PL330_STATE_FAULT_COMPLETING:
944		UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
945
946		if (_state(thrd) == PL330_STATE_KILLING)
947			UNTIL(thrd, PL330_STATE_STOPPED)
948
949	case PL330_STATE_FAULTING:
950		_stop(thrd);
951
952	case PL330_STATE_KILLING:
953	case PL330_STATE_COMPLETING:
954		UNTIL(thrd, PL330_STATE_STOPPED)
955
956	case PL330_STATE_STOPPED:
957		return _trigger(thrd);
958
959	case PL330_STATE_WFP:
960	case PL330_STATE_QUEUEBUSY:
961	case PL330_STATE_ATBARRIER:
962	case PL330_STATE_UPDTPC:
963	case PL330_STATE_CACHEMISS:
964	case PL330_STATE_EXECUTING:
965		return true;
966
967	case PL330_STATE_WFE: /* For RESUME, nothing yet */
968	default:
969		return false;
970	}
971}
972
973static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
974		const struct _xfer_spec *pxs, int cyc)
975{
976	int off = 0;
977
978	while (cyc--) {
979		off += _emit_LD(dry_run, &buf[off], ALWAYS);
980		off += _emit_RMB(dry_run, &buf[off]);
981		off += _emit_ST(dry_run, &buf[off], ALWAYS);
982		off += _emit_WMB(dry_run, &buf[off]);
983	}
984
985	return off;
986}
987
988static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
989		const struct _xfer_spec *pxs, int cyc)
990{
991	int off = 0;
992
993	while (cyc--) {
994		off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
995		off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
996		off += _emit_ST(dry_run, &buf[off], ALWAYS);
997		off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
998	}
999
1000	return off;
1001}
1002
1003static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1004		const struct _xfer_spec *pxs, int cyc)
1005{
1006	int off = 0;
1007
1008	while (cyc--) {
1009		off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1010		off += _emit_LD(dry_run, &buf[off], ALWAYS);
1011		off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1012		off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1013	}
1014
1015	return off;
1016}
1017
1018static int _bursts(unsigned dry_run, u8 buf[],
1019		const struct _xfer_spec *pxs, int cyc)
1020{
1021	int off = 0;
1022
1023	switch (pxs->r->rqtype) {
1024	case MEMTODEV:
1025		off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1026		break;
1027	case DEVTOMEM:
1028		off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1029		break;
1030	case MEMTOMEM:
1031		off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1032		break;
1033	default:
1034		off += 0x40000000; /* Scare off the Client */
1035		break;
1036	}
1037
1038	return off;
1039}
1040
1041/* Returns bytes consumed and updates bursts */
1042static inline int _loop(unsigned dry_run, u8 buf[],
1043		unsigned long *bursts, const struct _xfer_spec *pxs)
1044{
1045	int cyc, cycmax, szlp, szlpend, szbrst, off;
1046	unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1047	struct _arg_LPEND lpend;
1048
1049	/* Max iterations possibile in DMALP is 256 */
1050	if (*bursts >= 256*256) {
1051		lcnt1 = 256;
1052		lcnt0 = 256;
1053		cyc = *bursts / lcnt1 / lcnt0;
1054	} else if (*bursts > 256) {
1055		lcnt1 = 256;
1056		lcnt0 = *bursts / lcnt1;
1057		cyc = 1;
1058	} else {
1059		lcnt1 = *bursts;
1060		lcnt0 = 0;
1061		cyc = 1;
1062	}
1063
1064	szlp = _emit_LP(1, buf, 0, 0);
1065	szbrst = _bursts(1, buf, pxs, 1);
1066
1067	lpend.cond = ALWAYS;
1068	lpend.forever = false;
1069	lpend.loop = 0;
1070	lpend.bjump = 0;
1071	szlpend = _emit_LPEND(1, buf, &lpend);
1072
1073	if (lcnt0) {
1074		szlp *= 2;
1075		szlpend *= 2;
1076	}
1077
1078	/*
1079	 * Max bursts that we can unroll due to limit on the
1080	 * size of backward jump that can be encoded in DMALPEND
1081	 * which is 8-bits and hence 255
1082	 */
1083	cycmax = (255 - (szlp + szlpend)) / szbrst;
1084
1085	cyc = (cycmax < cyc) ? cycmax : cyc;
1086
1087	off = 0;
1088
1089	if (lcnt0) {
1090		off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1091		ljmp0 = off;
1092	}
1093
1094	off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1095	ljmp1 = off;
1096
1097	off += _bursts(dry_run, &buf[off], pxs, cyc);
1098
1099	lpend.cond = ALWAYS;
1100	lpend.forever = false;
1101	lpend.loop = 1;
1102	lpend.bjump = off - ljmp1;
1103	off += _emit_LPEND(dry_run, &buf[off], &lpend);
1104
1105	if (lcnt0) {
1106		lpend.cond = ALWAYS;
1107		lpend.forever = false;
1108		lpend.loop = 0;
1109		lpend.bjump = off - ljmp0;
1110		off += _emit_LPEND(dry_run, &buf[off], &lpend);
1111	}
1112
1113	*bursts = lcnt1 * cyc;
1114	if (lcnt0)
1115		*bursts *= lcnt0;
1116
1117	return off;
1118}
1119
1120static inline int _setup_loops(unsigned dry_run, u8 buf[],
1121		const struct _xfer_spec *pxs)
1122{
1123	struct pl330_xfer *x = pxs->x;
1124	u32 ccr = pxs->ccr;
1125	unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1126	int off = 0;
1127
1128	while (bursts) {
1129		c = bursts;
1130		off += _loop(dry_run, &buf[off], &c, pxs);
1131		bursts -= c;
1132	}
1133
1134	return off;
1135}
1136
1137static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1138		const struct _xfer_spec *pxs)
1139{
1140	struct pl330_xfer *x = pxs->x;
1141	int off = 0;
1142
1143	/* DMAMOV SAR, x->src_addr */
1144	off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1145	/* DMAMOV DAR, x->dst_addr */
1146	off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1147
1148	/* Setup Loop(s) */
1149	off += _setup_loops(dry_run, &buf[off], pxs);
1150
1151	return off;
1152}
1153
1154/*
1155 * A req is a sequence of one or more xfer units.
1156 * Returns the number of bytes taken to setup the MC for the req.
1157 */
1158static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1159		unsigned index, struct _xfer_spec *pxs)
1160{
1161	struct _pl330_req *req = &thrd->req[index];
1162	struct pl330_xfer *x;
1163	u8 *buf = req->mc_cpu;
1164	int off = 0;
1165
1166	PL330_DBGMC_START(req->mc_bus);
1167
1168	/* DMAMOV CCR, ccr */
1169	off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1170
1171	x = pxs->r->x;
1172	do {
1173		/* Error if xfer length is not aligned at burst size */
1174		if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1175			return -EINVAL;
1176
1177		pxs->x = x;
1178		off += _setup_xfer(dry_run, &buf[off], pxs);
1179
1180		x = x->next;
1181	} while (x);
1182
1183	/* DMASEV peripheral/event */
1184	off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1185	/* DMAEND */
1186	off += _emit_END(dry_run, &buf[off]);
1187
1188	return off;
1189}
1190
1191static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1192{
1193	u32 ccr = 0;
1194
1195	if (rqc->src_inc)
1196		ccr |= CC_SRCINC;
1197
1198	if (rqc->dst_inc)
1199		ccr |= CC_DSTINC;
1200
1201	/* We set same protection levels for Src and DST for now */
1202	if (rqc->privileged)
1203		ccr |= CC_SRCPRI | CC_DSTPRI;
1204	if (rqc->nonsecure)
1205		ccr |= CC_SRCNS | CC_DSTNS;
1206	if (rqc->insnaccess)
1207		ccr |= CC_SRCIA | CC_DSTIA;
1208
1209	ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1210	ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1211
1212	ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1213	ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1214
1215	ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
1216	ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
1217
1218	ccr |= (rqc->swap << CC_SWAP_SHFT);
1219
1220	return ccr;
1221}
1222
1223static inline bool _is_valid(u32 ccr)
1224{
1225	enum pl330_dstcachectrl dcctl;
1226	enum pl330_srccachectrl scctl;
1227
1228	dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1229	scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1230
1231	if (dcctl == DINVALID1 || dcctl == DINVALID2
1232			|| scctl == SINVALID1 || scctl == SINVALID2)
1233		return false;
1234	else
1235		return true;
1236}
1237
1238/*
1239 * Submit a list of xfers after which the client wants notification.
1240 * Client is not notified after each xfer unit, just once after all
1241 * xfer units are done or some error occurs.
1242 */
1243int pl330_submit_req(void *ch_id, struct pl330_req *r)
1244{
1245	struct pl330_thread *thrd = ch_id;
1246	struct pl330_dmac *pl330;
1247	struct pl330_info *pi;
1248	struct _xfer_spec xs;
1249	unsigned long flags;
1250	void __iomem *regs;
1251	unsigned idx;
1252	u32 ccr;
1253	int ret = 0;
1254
1255	/* No Req or Unacquired Channel or DMAC */
1256	if (!r || !thrd || thrd->free)
1257		return -EINVAL;
1258
1259	pl330 = thrd->dmac;
1260	pi = pl330->pinfo;
1261	regs = pi->base;
1262
1263	if (pl330->state == DYING
1264		|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1265		dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1266			__func__, __LINE__);
1267		return -EAGAIN;
1268	}
1269
1270	/* If request for non-existing peripheral */
1271	if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1272		dev_info(thrd->dmac->pinfo->dev,
1273				"%s:%d Invalid peripheral(%u)!\n",
1274				__func__, __LINE__, r->peri);
1275		return -EINVAL;
1276	}
1277
1278	spin_lock_irqsave(&pl330->lock, flags);
1279
1280	if (_queue_full(thrd)) {
1281		ret = -EAGAIN;
1282		goto xfer_exit;
1283	}
1284
1285	/* Prefer Secure Channel */
1286	if (!_manager_ns(thrd))
1287		r->cfg->nonsecure = 0;
1288	else
1289		r->cfg->nonsecure = 1;
1290
1291	/* Use last settings, if not provided */
1292	if (r->cfg)
1293		ccr = _prepare_ccr(r->cfg);
1294	else
1295		ccr = readl(regs + CC(thrd->id));
1296
1297	/* If this req doesn't have valid xfer settings */
1298	if (!_is_valid(ccr)) {
1299		ret = -EINVAL;
1300		dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1301			__func__, __LINE__, ccr);
1302		goto xfer_exit;
1303	}
1304
1305	idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1306
1307	xs.ccr = ccr;
1308	xs.r = r;
1309
1310	/* First dry run to check if req is acceptable */
1311	ret = _setup_req(1, thrd, idx, &xs);
1312	if (ret < 0)
1313		goto xfer_exit;
1314
1315	if (ret > pi->mcbufsz / 2) {
1316		dev_info(thrd->dmac->pinfo->dev,
1317			"%s:%d Trying increasing mcbufsz\n",
1318				__func__, __LINE__);
1319		ret = -ENOMEM;
1320		goto xfer_exit;
1321	}
1322
1323	/* Hook the request */
1324	thrd->lstenq = idx;
1325	thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1326	thrd->req[idx].r = r;
1327
1328	ret = 0;
1329
1330xfer_exit:
1331	spin_unlock_irqrestore(&pl330->lock, flags);
1332
1333	return ret;
1334}
1335EXPORT_SYMBOL(pl330_submit_req);
1336
1337static void pl330_dotask(unsigned long data)
1338{
1339	struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1340	struct pl330_info *pi = pl330->pinfo;
1341	unsigned long flags;
1342	int i;
1343
1344	spin_lock_irqsave(&pl330->lock, flags);
1345
1346	/* The DMAC itself gone nuts */
1347	if (pl330->dmac_tbd.reset_dmac) {
1348		pl330->state = DYING;
1349		/* Reset the manager too */
1350		pl330->dmac_tbd.reset_mngr = true;
1351		/* Clear the reset flag */
1352		pl330->dmac_tbd.reset_dmac = false;
1353	}
1354
1355	if (pl330->dmac_tbd.reset_mngr) {
1356		_stop(pl330->manager);
1357		/* Reset all channels */
1358		pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1359		/* Clear the reset flag */
1360		pl330->dmac_tbd.reset_mngr = false;
1361	}
1362
1363	for (i = 0; i < pi->pcfg.num_chan; i++) {
1364
1365		if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1366			struct pl330_thread *thrd = &pl330->channels[i];
1367			void __iomem *regs = pi->base;
1368			enum pl330_op_err err;
1369
1370			_stop(thrd);
1371
1372			if (readl(regs + FSC) & (1 << thrd->id))
1373				err = PL330_ERR_FAIL;
1374			else
1375				err = PL330_ERR_ABORT;
1376
1377			spin_unlock_irqrestore(&pl330->lock, flags);
1378
1379			_callback(thrd->req[1 - thrd->lstenq].r, err);
1380			_callback(thrd->req[thrd->lstenq].r, err);
1381
1382			spin_lock_irqsave(&pl330->lock, flags);
1383
1384			thrd->req[0].r = NULL;
1385			thrd->req[1].r = NULL;
1386			MARK_FREE(&thrd->req[0]);
1387			MARK_FREE(&thrd->req[1]);
1388
1389			/* Clear the reset flag */
1390			pl330->dmac_tbd.reset_chan &= ~(1 << i);
1391		}
1392	}
1393
1394	spin_unlock_irqrestore(&pl330->lock, flags);
1395
1396	return;
1397}
1398
1399/* Returns 1 if state was updated, 0 otherwise */
1400int pl330_update(const struct pl330_info *pi)
1401{
1402	struct _pl330_req *rqdone;
1403	struct pl330_dmac *pl330;
1404	unsigned long flags;
1405	void __iomem *regs;
1406	u32 val;
1407	int id, ev, ret = 0;
1408
1409	if (!pi || !pi->pl330_data)
1410		return 0;
1411
1412	regs = pi->base;
1413	pl330 = pi->pl330_data;
1414
1415	spin_lock_irqsave(&pl330->lock, flags);
1416
1417	val = readl(regs + FSM) & 0x1;
1418	if (val)
1419		pl330->dmac_tbd.reset_mngr = true;
1420	else
1421		pl330->dmac_tbd.reset_mngr = false;
1422
1423	val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1424	pl330->dmac_tbd.reset_chan |= val;
1425	if (val) {
1426		int i = 0;
1427		while (i < pi->pcfg.num_chan) {
1428			if (val & (1 << i)) {
1429				dev_info(pi->dev,
1430					"Reset Channel-%d\t CS-%x FTC-%x\n",
1431						i, readl(regs + CS(i)),
1432						readl(regs + FTC(i)));
1433				_stop(&pl330->channels[i]);
1434			}
1435			i++;
1436		}
1437	}
1438
1439	/* Check which event happened i.e, thread notified */
1440	val = readl(regs + ES);
1441	if (pi->pcfg.num_events < 32
1442			&& val & ~((1 << pi->pcfg.num_events) - 1)) {
1443		pl330->dmac_tbd.reset_dmac = true;
1444		dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1445		ret = 1;
1446		goto updt_exit;
1447	}
1448
1449	for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1450		if (val & (1 << ev)) { /* Event occured */
1451			struct pl330_thread *thrd;
1452			u32 inten = readl(regs + INTEN);
1453			int active;
1454
1455			/* Clear the event */
1456			if (inten & (1 << ev))
1457				writel(1 << ev, regs + INTCLR);
1458
1459			ret = 1;
1460
1461			id = pl330->events[ev];
1462
1463			thrd = &pl330->channels[id];
1464
1465			active = _thrd_active(thrd);
1466			if (!active) /* Aborted */
1467				continue;
1468
1469			active -= 1;
1470
1471			rqdone = &thrd->req[active];
1472			MARK_FREE(rqdone);
1473
1474			/* Get going again ASAP */
1475			_start(thrd);
1476
1477			/* For now, just make a list of callbacks to be done */
1478			list_add_tail(&rqdone->rqd, &pl330->req_done);
1479		}
1480	}
1481
1482	/* Now that we are in no hurry, do the callbacks */
1483	while (!list_empty(&pl330->req_done)) {
1484		rqdone = container_of(pl330->req_done.next,
1485					struct _pl330_req, rqd);
1486
1487		list_del_init(&rqdone->rqd);
1488
1489		spin_unlock_irqrestore(&pl330->lock, flags);
1490		_callback(rqdone->r, PL330_ERR_NONE);
1491		spin_lock_irqsave(&pl330->lock, flags);
1492	}
1493
1494updt_exit:
1495	spin_unlock_irqrestore(&pl330->lock, flags);
1496
1497	if (pl330->dmac_tbd.reset_dmac
1498			|| pl330->dmac_tbd.reset_mngr
1499			|| pl330->dmac_tbd.reset_chan) {
1500		ret = 1;
1501		tasklet_schedule(&pl330->tasks);
1502	}
1503
1504	return ret;
1505}
1506EXPORT_SYMBOL(pl330_update);
1507
1508int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1509{
1510	struct pl330_thread *thrd = ch_id;
1511	struct pl330_dmac *pl330;
1512	unsigned long flags;
1513	int ret = 0, active;
1514
1515	if (!thrd || thrd->free || thrd->dmac->state == DYING)
1516		return -EINVAL;
1517
1518	pl330 = thrd->dmac;
1519
1520	spin_lock_irqsave(&pl330->lock, flags);
1521
1522	switch (op) {
1523	case PL330_OP_FLUSH:
1524		/* Make sure the channel is stopped */
1525		_stop(thrd);
1526
1527		thrd->req[0].r = NULL;
1528		thrd->req[1].r = NULL;
1529		MARK_FREE(&thrd->req[0]);
1530		MARK_FREE(&thrd->req[1]);
1531		break;
1532
1533	case PL330_OP_ABORT:
1534		active = _thrd_active(thrd);
1535
1536		/* Make sure the channel is stopped */
1537		_stop(thrd);
1538
1539		/* ABORT is only for the active req */
1540		if (!active)
1541			break;
1542
1543		active--;
1544
1545		thrd->req[active].r = NULL;
1546		MARK_FREE(&thrd->req[active]);
1547
1548		/* Start the next */
1549	case PL330_OP_START:
1550		if (!_start(thrd))
1551			ret = -EIO;
1552		break;
1553
1554	default:
1555		ret = -EINVAL;
1556	}
1557
1558	spin_unlock_irqrestore(&pl330->lock, flags);
1559	return ret;
1560}
1561EXPORT_SYMBOL(pl330_chan_ctrl);
1562
1563int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
1564{
1565	struct pl330_thread *thrd = ch_id;
1566	struct pl330_dmac *pl330;
1567	struct pl330_info *pi;
1568	void __iomem *regs;
1569	int active;
1570	u32 val;
1571
1572	if (!pstatus || !thrd || thrd->free)
1573		return -EINVAL;
1574
1575	pl330 = thrd->dmac;
1576	pi = pl330->pinfo;
1577	regs = pi->base;
1578
1579	/* The client should remove the DMAC and add again */
1580	if (pl330->state == DYING)
1581		pstatus->dmac_halted = true;
1582	else
1583		pstatus->dmac_halted = false;
1584
1585	val = readl(regs + FSC);
1586	if (val & (1 << thrd->id))
1587		pstatus->faulting = true;
1588	else
1589		pstatus->faulting = false;
1590
1591	active = _thrd_active(thrd);
1592
1593	if (!active) {
1594		/* Indicate that the thread is not running */
1595		pstatus->top_req = NULL;
1596		pstatus->wait_req = NULL;
1597	} else {
1598		active--;
1599		pstatus->top_req = thrd->req[active].r;
1600		pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
1601					? thrd->req[1 - active].r : NULL;
1602	}
1603
1604	pstatus->src_addr = readl(regs + SA(thrd->id));
1605	pstatus->dst_addr = readl(regs + DA(thrd->id));
1606
1607	return 0;
1608}
1609EXPORT_SYMBOL(pl330_chan_status);
1610
1611/* Reserve an event */
1612static inline int _alloc_event(struct pl330_thread *thrd)
1613{
1614	struct pl330_dmac *pl330 = thrd->dmac;
1615	struct pl330_info *pi = pl330->pinfo;
1616	int ev;
1617
1618	for (ev = 0; ev < pi->pcfg.num_events; ev++)
1619		if (pl330->events[ev] == -1) {
1620			pl330->events[ev] = thrd->id;
1621			return ev;
1622		}
1623
1624	return -1;
1625}
1626
1627/* Upon success, returns IdentityToken for the
1628 * allocated channel, NULL otherwise.
1629 */
1630void *pl330_request_channel(const struct pl330_info *pi)
1631{
1632	struct pl330_thread *thrd = NULL;
1633	struct pl330_dmac *pl330;
1634	unsigned long flags;
1635	int chans, i;
1636
1637	if (!pi || !pi->pl330_data)
1638		return NULL;
1639
1640	pl330 = pi->pl330_data;
1641
1642	if (pl330->state == DYING)
1643		return NULL;
1644
1645	chans = pi->pcfg.num_chan;
1646
1647	spin_lock_irqsave(&pl330->lock, flags);
1648
1649	for (i = 0; i < chans; i++) {
1650		thrd = &pl330->channels[i];
1651		if (thrd->free) {
1652			thrd->ev = _alloc_event(thrd);
1653			if (thrd->ev >= 0) {
1654				thrd->free = false;
1655				thrd->lstenq = 1;
1656				thrd->req[0].r = NULL;
1657				MARK_FREE(&thrd->req[0]);
1658				thrd->req[1].r = NULL;
1659				MARK_FREE(&thrd->req[1]);
1660				break;
1661			}
1662		}
1663		thrd = NULL;
1664	}
1665
1666	spin_unlock_irqrestore(&pl330->lock, flags);
1667
1668	return thrd;
1669}
1670EXPORT_SYMBOL(pl330_request_channel);
1671
1672/* Release an event */
1673static inline void _free_event(struct pl330_thread *thrd, int ev)
1674{
1675	struct pl330_dmac *pl330 = thrd->dmac;
1676	struct pl330_info *pi = pl330->pinfo;
1677
1678	/* If the event is valid and was held by the thread */
1679	if (ev >= 0 && ev < pi->pcfg.num_events
1680			&& pl330->events[ev] == thrd->id)
1681		pl330->events[ev] = -1;
1682}
1683
1684void pl330_release_channel(void *ch_id)
1685{
1686	struct pl330_thread *thrd = ch_id;
1687	struct pl330_dmac *pl330;
1688	unsigned long flags;
1689
1690	if (!thrd || thrd->free)
1691		return;
1692
1693	_stop(thrd);
1694
1695	_callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1696	_callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1697
1698	pl330 = thrd->dmac;
1699
1700	spin_lock_irqsave(&pl330->lock, flags);
1701	_free_event(thrd, thrd->ev);
1702	thrd->free = true;
1703	spin_unlock_irqrestore(&pl330->lock, flags);
1704}
1705EXPORT_SYMBOL(pl330_release_channel);
1706
1707/* Initialize the structure for PL330 configuration, that can be used
1708 * by the client driver the make best use of the DMAC
1709 */
1710static void read_dmac_config(struct pl330_info *pi)
1711{
1712	void __iomem *regs = pi->base;
1713	u32 val;
1714
1715	val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1716	val &= CRD_DATA_WIDTH_MASK;
1717	pi->pcfg.data_bus_width = 8 * (1 << val);
1718
1719	val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1720	val &= CRD_DATA_BUFF_MASK;
1721	pi->pcfg.data_buf_dep = val + 1;
1722
1723	val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1724	val &= CR0_NUM_CHANS_MASK;
1725	val += 1;
1726	pi->pcfg.num_chan = val;
1727
1728	val = readl(regs + CR0);
1729	if (val & CR0_PERIPH_REQ_SET) {
1730		val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1731		val += 1;
1732		pi->pcfg.num_peri = val;
1733		pi->pcfg.peri_ns = readl(regs + CR4);
1734	} else {
1735		pi->pcfg.num_peri = 0;
1736	}
1737
1738	val = readl(regs + CR0);
1739	if (val & CR0_BOOT_MAN_NS)
1740		pi->pcfg.mode |= DMAC_MODE_NS;
1741	else
1742		pi->pcfg.mode &= ~DMAC_MODE_NS;
1743
1744	val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1745	val &= CR0_NUM_EVENTS_MASK;
1746	val += 1;
1747	pi->pcfg.num_events = val;
1748
1749	pi->pcfg.irq_ns = readl(regs + CR3);
1750
1751	pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1752	pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1753}
1754
1755static inline void _reset_thread(struct pl330_thread *thrd)
1756{
1757	struct pl330_dmac *pl330 = thrd->dmac;
1758	struct pl330_info *pi = pl330->pinfo;
1759
1760	thrd->req[0].mc_cpu = pl330->mcode_cpu
1761				+ (thrd->id * pi->mcbufsz);
1762	thrd->req[0].mc_bus = pl330->mcode_bus
1763				+ (thrd->id * pi->mcbufsz);
1764	thrd->req[0].r = NULL;
1765	MARK_FREE(&thrd->req[0]);
1766
1767	thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1768				+ pi->mcbufsz / 2;
1769	thrd->req[1].mc_bus = thrd->req[0].mc_bus
1770				+ pi->mcbufsz / 2;
1771	thrd->req[1].r = NULL;
1772	MARK_FREE(&thrd->req[1]);
1773}
1774
1775static int dmac_alloc_threads(struct pl330_dmac *pl330)
1776{
1777	struct pl330_info *pi = pl330->pinfo;
1778	int chans = pi->pcfg.num_chan;
1779	struct pl330_thread *thrd;
1780	int i;
1781
1782	/* Allocate 1 Manager and 'chans' Channel threads */
1783	pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1784					GFP_KERNEL);
1785	if (!pl330->channels)
1786		return -ENOMEM;
1787
1788	/* Init Channel threads */
1789	for (i = 0; i < chans; i++) {
1790		thrd = &pl330->channels[i];
1791		thrd->id = i;
1792		thrd->dmac = pl330;
1793		_reset_thread(thrd);
1794		thrd->free = true;
1795	}
1796
1797	/* MANAGER is indexed at the end */
1798	thrd = &pl330->channels[chans];
1799	thrd->id = chans;
1800	thrd->dmac = pl330;
1801	thrd->free = false;
1802	pl330->manager = thrd;
1803
1804	return 0;
1805}
1806
1807static int dmac_alloc_resources(struct pl330_dmac *pl330)
1808{
1809	struct pl330_info *pi = pl330->pinfo;
1810	int chans = pi->pcfg.num_chan;
1811	int ret;
1812
1813	/*
1814	 * Alloc MicroCode buffer for 'chans' Channel threads.
1815	 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1816	 */
1817	pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
1818				chans * pi->mcbufsz,
1819				&pl330->mcode_bus, GFP_KERNEL);
1820	if (!pl330->mcode_cpu) {
1821		dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1822			__func__, __LINE__);
1823		return -ENOMEM;
1824	}
1825
1826	ret = dmac_alloc_threads(pl330);
1827	if (ret) {
1828		dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
1829			__func__, __LINE__);
1830		dma_free_coherent(pi->dev,
1831				chans * pi->mcbufsz,
1832				pl330->mcode_cpu, pl330->mcode_bus);
1833		return ret;
1834	}
1835
1836	return 0;
1837}
1838
1839int pl330_add(struct pl330_info *pi)
1840{
1841	struct pl330_dmac *pl330;
1842	void __iomem *regs;
1843	int i, ret;
1844
1845	if (!pi || !pi->dev)
1846		return -EINVAL;
1847
1848	/* If already added */
1849	if (pi->pl330_data)
1850		return -EINVAL;
1851
1852	/*
1853	 * If the SoC can perform reset on the DMAC, then do it
1854	 * before reading its configuration.
1855	 */
1856	if (pi->dmac_reset)
1857		pi->dmac_reset(pi);
1858
1859	regs = pi->base;
1860
1861	/* Check if we can handle this DMAC */
1862	if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL
1863	   || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1864		dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1865			readl(regs + PERIPH_ID), readl(regs + PCELL_ID));
1866		return -EINVAL;
1867	}
1868
1869	/* Read the configuration of the DMAC */
1870	read_dmac_config(pi);
1871
1872	if (pi->pcfg.num_events == 0) {
1873		dev_err(pi->dev, "%s:%d Can't work without events!\n",
1874			__func__, __LINE__);
1875		return -EINVAL;
1876	}
1877
1878	pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
1879	if (!pl330) {
1880		dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1881			__func__, __LINE__);
1882		return -ENOMEM;
1883	}
1884
1885	/* Assign the info structure and private data */
1886	pl330->pinfo = pi;
1887	pi->pl330_data = pl330;
1888
1889	spin_lock_init(&pl330->lock);
1890
1891	INIT_LIST_HEAD(&pl330->req_done);
1892
1893	/* Use default MC buffer size if not provided */
1894	if (!pi->mcbufsz)
1895		pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1896
1897	/* Mark all events as free */
1898	for (i = 0; i < pi->pcfg.num_events; i++)
1899		pl330->events[i] = -1;
1900
1901	/* Allocate resources needed by the DMAC */
1902	ret = dmac_alloc_resources(pl330);
1903	if (ret) {
1904		dev_err(pi->dev, "Unable to create channels for DMAC\n");
1905		kfree(pl330);
1906		return ret;
1907	}
1908
1909	tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1910
1911	pl330->state = INIT;
1912
1913	return 0;
1914}
1915EXPORT_SYMBOL(pl330_add);
1916
1917static int dmac_free_threads(struct pl330_dmac *pl330)
1918{
1919	struct pl330_info *pi = pl330->pinfo;
1920	int chans = pi->pcfg.num_chan;
1921	struct pl330_thread *thrd;
1922	int i;
1923
1924	/* Release Channel threads */
1925	for (i = 0; i < chans; i++) {
1926		thrd = &pl330->channels[i];
1927		pl330_release_channel((void *)thrd);
1928	}
1929
1930	/* Free memory */
1931	kfree(pl330->channels);
1932
1933	return 0;
1934}
1935
1936static void dmac_free_resources(struct pl330_dmac *pl330)
1937{
1938	struct pl330_info *pi = pl330->pinfo;
1939	int chans = pi->pcfg.num_chan;
1940
1941	dmac_free_threads(pl330);
1942
1943	dma_free_coherent(pi->dev, chans * pi->mcbufsz,
1944				pl330->mcode_cpu, pl330->mcode_bus);
1945}
1946
1947void pl330_del(struct pl330_info *pi)
1948{
1949	struct pl330_dmac *pl330;
1950
1951	if (!pi || !pi->pl330_data)
1952		return;
1953
1954	pl330 = pi->pl330_data;
1955
1956	pl330->state = UNINIT;
1957
1958	tasklet_kill(&pl330->tasks);
1959
1960	/* Free DMAC resources */
1961	dmac_free_resources(pl330);
1962
1963	kfree(pl330);
1964	pi->pl330_data = NULL;
1965}
1966EXPORT_SYMBOL(pl330_del);
1967