• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/dma/
1/*
2 *  intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 *  Copyright (C) 2008-10 Intel Corp
5 *  Author: Vinod Koul <vinod.koul@intel.com>
6 *  The driver design is based on dw_dmac driver
7 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 *  This program is free software; you can redistribute it and/or modify
10 *  it under the terms of the GNU General Public License as published by
11 *  the Free Software Foundation; version 2 of the License.
12 *
13 *  This program is distributed in the hope that it will be useful, but
14 *  WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 *  General Public License for more details.
17 *
18 *  You should have received a copy of the GNU General Public License along
19 *  with this program; if not, write to the Free Software Foundation, Inc.,
20 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 *
25 */
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/intel_mid_dma.h>
29
30#define MAX_CHAN	4 /*max ch across controllers*/
31#include "intel_mid_dma_regs.h"
32
33#define INTEL_MID_DMAC1_ID		0x0814
34#define INTEL_MID_DMAC2_ID		0x0813
35#define INTEL_MID_GP_DMAC2_ID		0x0827
36#define INTEL_MFLD_DMAC1_ID		0x0830
37#define LNW_PERIPHRAL_MASK_BASE		0xFFAE8008
38#define LNW_PERIPHRAL_MASK_SIZE		0x10
39#define LNW_PERIPHRAL_STATUS		0x0
40#define LNW_PERIPHRAL_MASK		0x8
41
42struct intel_mid_dma_probe_info {
43	u8 max_chan;
44	u8 ch_base;
45	u16 block_size;
46	u32 pimr_mask;
47};
48
49#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
50	((kernel_ulong_t)&(struct intel_mid_dma_probe_info) {	\
51		.max_chan = (_max_chan),			\
52		.ch_base = (_ch_base),				\
53		.block_size = (_block_size),			\
54		.pimr_mask = (_pimr_mask),			\
55	})
56
57/*****************************************************************************
58Utility Functions*/
59/**
60 * get_ch_index	-	convert status to channel
61 * @status: status mask
62 * @base: dma ch base value
63 *
64 * Modify the status mask and return the channel index needing
65 * attention (or -1 if neither)
66 */
67static int get_ch_index(int *status, unsigned int base)
68{
69	int i;
70	for (i = 0; i < MAX_CHAN; i++) {
71		if (*status & (1 << (i + base))) {
72			*status = *status & ~(1 << (i + base));
73			pr_debug("MDMA: index %d New status %x\n", i, *status);
74			return i;
75		}
76	}
77	return -1;
78}
79
80/**
81 * get_block_ts	-	calculates dma transaction length
82 * @len: dma transfer length
83 * @tx_width: dma transfer src width
84 * @block_size: dma controller max block size
85 *
86 * Based on src width calculate the DMA trsaction length in data items
87 * return data items or FFFF if exceeds max length for block
88 */
89static int get_block_ts(int len, int tx_width, int block_size)
90{
91	int byte_width = 0, block_ts = 0;
92
93	switch (tx_width) {
94	case LNW_DMA_WIDTH_8BIT:
95		byte_width = 1;
96		break;
97	case LNW_DMA_WIDTH_16BIT:
98		byte_width = 2;
99		break;
100	case LNW_DMA_WIDTH_32BIT:
101	default:
102		byte_width = 4;
103		break;
104	}
105
106	block_ts = len/byte_width;
107	if (block_ts > block_size)
108		block_ts = 0xFFFF;
109	return block_ts;
110}
111
112/*****************************************************************************
113DMAC1 interrupt Functions*/
114
115/**
116 * dmac1_mask_periphral_intr -	mask the periphral interrupt
117 * @midc: dma channel for which masking is required
118 *
119 * Masks the DMA periphral interrupt
120 * this is valid for DMAC1 family controllers only
121 * This controller should have periphral mask registers already mapped
122 */
123static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
124{
125	u32 pimr;
126	struct middma_device *mid = to_middma_device(midc->chan.device);
127
128	if (mid->pimr_mask) {
129		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
130		pimr |= mid->pimr_mask;
131		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
132	}
133	return;
134}
135
136/**
137 * dmac1_unmask_periphral_intr -	unmask the periphral interrupt
138 * @midc: dma channel for which masking is required
139 *
140 * UnMasks the DMA periphral interrupt,
141 * this is valid for DMAC1 family controllers only
142 * This controller should have periphral mask registers already mapped
143 */
144static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
145{
146	u32 pimr;
147	struct middma_device *mid = to_middma_device(midc->chan.device);
148
149	if (mid->pimr_mask) {
150		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
151		pimr &= ~mid->pimr_mask;
152		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
153	}
154	return;
155}
156
157/**
158 * enable_dma_interrupt -	enable the periphral interrupt
159 * @midc: dma channel for which enable interrupt is required
160 *
161 * Enable the DMA periphral interrupt,
162 * this is valid for DMAC1 family controllers only
163 * This controller should have periphral mask registers already mapped
164 */
165static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
166{
167	dmac1_unmask_periphral_intr(midc);
168
169	/*en ch interrupts*/
170	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
171	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
172	return;
173}
174
175/**
176 * disable_dma_interrupt -	disable the periphral interrupt
177 * @midc: dma channel for which disable interrupt is required
178 *
179 * Disable the DMA periphral interrupt,
180 * this is valid for DMAC1 family controllers only
181 * This controller should have periphral mask registers already mapped
182 */
183static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
184{
185	/*Check LPE PISR, make sure fwd is disabled*/
186	dmac1_mask_periphral_intr(midc);
187	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
188	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
189	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
190	return;
191}
192
193/*****************************************************************************
194DMA channel helper Functions*/
195/**
196 * mid_desc_get		-	get a descriptor
197 * @midc: dma channel for which descriptor is required
198 *
199 * Obtain a descriptor for the channel. Returns NULL if none are free.
200 * Once the descriptor is returned it is private until put on another
201 * list or freed
202 */
203static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
204{
205	struct intel_mid_dma_desc *desc, *_desc;
206	struct intel_mid_dma_desc *ret = NULL;
207
208	spin_lock_bh(&midc->lock);
209	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
210		if (async_tx_test_ack(&desc->txd)) {
211			list_del(&desc->desc_node);
212			ret = desc;
213			break;
214		}
215	}
216	spin_unlock_bh(&midc->lock);
217	return ret;
218}
219
220/**
221 * mid_desc_put		-	put a descriptor
222 * @midc: dma channel for which descriptor is required
223 * @desc: descriptor to put
224 *
225 * Return a descriptor from lwn_desc_get back to the free pool
226 */
227static void midc_desc_put(struct intel_mid_dma_chan *midc,
228			struct intel_mid_dma_desc *desc)
229{
230	if (desc) {
231		spin_lock_bh(&midc->lock);
232		list_add_tail(&desc->desc_node, &midc->free_list);
233		spin_unlock_bh(&midc->lock);
234	}
235}
236/**
237 * midc_dostart		-		begin a DMA transaction
238 * @midc: channel for which txn is to be started
239 * @first: first descriptor of series
240 *
241 * Load a transaction into the engine. This must be called with midc->lock
242 * held and bh disabled.
243 */
244static void midc_dostart(struct intel_mid_dma_chan *midc,
245			struct intel_mid_dma_desc *first)
246{
247	struct middma_device *mid = to_middma_device(midc->chan.device);
248
249	/*  channel is idle */
250	if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
251		/*error*/
252		pr_err("ERR_MDMA: channel is busy in start\n");
253		/* The tasklet will hopefully advance the queue... */
254		return;
255	}
256
257	/*write registers and en*/
258	iowrite32(first->sar, midc->ch_regs + SAR);
259	iowrite32(first->dar, midc->ch_regs + DAR);
260	iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
261	iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
262	iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
263	iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
264	pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
265		(int)first->sar, (int)first->dar, first->cfg_hi,
266		first->cfg_lo, first->ctl_hi, first->ctl_lo);
267
268	iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
269	first->status = DMA_IN_PROGRESS;
270}
271
272/**
273 * midc_descriptor_complete	-	process completed descriptor
274 * @midc: channel owning the descriptor
275 * @desc: the descriptor itself
276 *
277 * Process a completed descriptor and perform any callbacks upon
278 * the completion. The completion handling drops the lock during the
279 * callbacks but must be called with the lock held.
280 */
281static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
282	       struct intel_mid_dma_desc *desc)
283{
284	struct dma_async_tx_descriptor	*txd = &desc->txd;
285	dma_async_tx_callback callback_txd = NULL;
286	void *param_txd = NULL;
287
288	midc->completed = txd->cookie;
289	callback_txd = txd->callback;
290	param_txd = txd->callback_param;
291
292	list_move(&desc->desc_node, &midc->free_list);
293
294	spin_unlock_bh(&midc->lock);
295	if (callback_txd) {
296		pr_debug("MDMA: TXD callback set ... calling\n");
297		callback_txd(param_txd);
298		spin_lock_bh(&midc->lock);
299		return;
300	}
301	spin_lock_bh(&midc->lock);
302
303}
304/**
305 * midc_scan_descriptors -		check the descriptors in channel
306 *					mark completed when tx is completete
307 * @mid: device
308 * @midc: channel to scan
309 *
310 * Walk the descriptor chain for the device and process any entries
311 * that are complete.
312 */
313static void midc_scan_descriptors(struct middma_device *mid,
314				struct intel_mid_dma_chan *midc)
315{
316	struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
317
318	/*tx is complete*/
319	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
320		if (desc->status == DMA_IN_PROGRESS)  {
321			desc->status = DMA_SUCCESS;
322			midc_descriptor_complete(midc, desc);
323		}
324	}
325	return;
326}
327
328/*****************************************************************************
329DMA engine callback Functions*/
330/**
331 * intel_mid_dma_tx_submit -	callback to submit DMA transaction
332 * @tx: dma engine descriptor
333 *
334 * Submit the DMA trasaction for this descriptor, start if ch idle
335 */
336static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
337{
338	struct intel_mid_dma_desc	*desc = to_intel_mid_dma_desc(tx);
339	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(tx->chan);
340	dma_cookie_t		cookie;
341
342	spin_lock_bh(&midc->lock);
343	cookie = midc->chan.cookie;
344
345	if (++cookie < 0)
346		cookie = 1;
347
348	midc->chan.cookie = cookie;
349	desc->txd.cookie = cookie;
350
351
352	if (list_empty(&midc->active_list)) {
353		midc_dostart(midc, desc);
354		list_add_tail(&desc->desc_node, &midc->active_list);
355	} else {
356		list_add_tail(&desc->desc_node, &midc->queue);
357	}
358	spin_unlock_bh(&midc->lock);
359
360	return cookie;
361}
362
363/**
364 * intel_mid_dma_issue_pending -	callback to issue pending txn
365 * @chan: chan where pending trascation needs to be checked and submitted
366 *
367 * Call for scan to issue pending descriptors
368 */
369static void intel_mid_dma_issue_pending(struct dma_chan *chan)
370{
371	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
372
373	spin_lock_bh(&midc->lock);
374	if (!list_empty(&midc->queue))
375		midc_scan_descriptors(to_middma_device(chan->device), midc);
376	spin_unlock_bh(&midc->lock);
377}
378
379/**
380 * intel_mid_dma_tx_status -	Return status of txn
381 * @chan: chan for where status needs to be checked
382 * @cookie: cookie for txn
383 * @txstate: DMA txn state
384 *
385 * Return status of DMA txn
386 */
387static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
388						dma_cookie_t cookie,
389						struct dma_tx_state *txstate)
390{
391	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
392	dma_cookie_t		last_used;
393	dma_cookie_t		last_complete;
394	int				ret;
395
396	last_complete = midc->completed;
397	last_used = chan->cookie;
398
399	ret = dma_async_is_complete(cookie, last_complete, last_used);
400	if (ret != DMA_SUCCESS) {
401		midc_scan_descriptors(to_middma_device(chan->device), midc);
402
403		last_complete = midc->completed;
404		last_used = chan->cookie;
405
406		ret = dma_async_is_complete(cookie, last_complete, last_used);
407	}
408
409	if (txstate) {
410		txstate->last = last_complete;
411		txstate->used = last_used;
412		txstate->residue = 0;
413	}
414	return ret;
415}
416
417/**
418 * intel_mid_dma_device_control -	DMA device control
419 * @chan: chan for DMA control
420 * @cmd: control cmd
421 * @arg: cmd arg value
422 *
423 * Perform DMA control command
424 */
425static int intel_mid_dma_device_control(struct dma_chan *chan,
426			enum dma_ctrl_cmd cmd, unsigned long arg)
427{
428	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
429	struct middma_device	*mid = to_middma_device(chan->device);
430	struct intel_mid_dma_desc	*desc, *_desc;
431	LIST_HEAD(list);
432
433	if (cmd != DMA_TERMINATE_ALL)
434		return -ENXIO;
435
436	spin_lock_bh(&midc->lock);
437	if (midc->in_use == false) {
438		spin_unlock_bh(&midc->lock);
439		return 0;
440	}
441	list_splice_init(&midc->free_list, &list);
442	midc->descs_allocated = 0;
443	midc->slave = NULL;
444
445	/* Disable interrupts */
446	disable_dma_interrupt(midc);
447
448	spin_unlock_bh(&midc->lock);
449	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
450		pr_debug("MDMA: freeing descriptor %p\n", desc);
451		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
452	}
453	return 0;
454}
455
456/**
457 * intel_mid_dma_prep_slave_sg -	Prep slave sg txn
458 * @chan: chan for DMA transfer
459 * @sgl: scatter gather list
460 * @sg_len: length of sg txn
461 * @direction: DMA transfer dirtn
462 * @flags: DMA flags
463 *
464 * Do DMA sg txn: NOT supported now
465 */
466static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
467			struct dma_chan *chan, struct scatterlist *sgl,
468			unsigned int sg_len, enum dma_data_direction direction,
469			unsigned long flags)
470{
471	/*not supported now*/
472	return NULL;
473}
474
475/**
476 * intel_mid_dma_prep_memcpy -	Prep memcpy txn
477 * @chan: chan for DMA transfer
478 * @dest: destn address
479 * @src: src address
480 * @len: DMA transfer len
481 * @flags: DMA flags
482 *
483 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
484 * The periphral txn details should be filled in slave structure properly
485 * Returns the descriptor for this txn
486 */
487static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
488			struct dma_chan *chan, dma_addr_t dest,
489			dma_addr_t src, size_t len, unsigned long flags)
490{
491	struct intel_mid_dma_chan *midc;
492	struct intel_mid_dma_desc *desc = NULL;
493	struct intel_mid_dma_slave *mids;
494	union intel_mid_dma_ctl_lo ctl_lo;
495	union intel_mid_dma_ctl_hi ctl_hi;
496	union intel_mid_dma_cfg_lo cfg_lo;
497	union intel_mid_dma_cfg_hi cfg_hi;
498	enum intel_mid_dma_width width = 0;
499
500	pr_debug("MDMA: Prep for memcpy\n");
501	WARN_ON(!chan);
502	if (!len)
503		return NULL;
504
505	mids = chan->private;
506	WARN_ON(!mids);
507
508	midc = to_intel_mid_dma_chan(chan);
509	WARN_ON(!midc);
510
511	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
512				midc->dma->pci_id, midc->ch_id, len);
513	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
514		mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
515
516	/*calculate CFG_LO*/
517	if (mids->hs_mode == LNW_DMA_SW_HS) {
518		cfg_lo.cfg_lo = 0;
519		cfg_lo.cfgx.hs_sel_dst = 1;
520		cfg_lo.cfgx.hs_sel_src = 1;
521	} else if (mids->hs_mode == LNW_DMA_HW_HS)
522		cfg_lo.cfg_lo = 0x00000;
523
524	/*calculate CFG_HI*/
525	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
526		/*SW HS only*/
527		cfg_hi.cfg_hi = 0;
528	} else {
529		cfg_hi.cfg_hi = 0;
530		if (midc->dma->pimr_mask) {
531			cfg_hi.cfgx.protctl = 0x0; /*default value*/
532			cfg_hi.cfgx.fifo_mode = 1;
533			if (mids->dirn == DMA_TO_DEVICE) {
534				cfg_hi.cfgx.src_per = 0;
535				if (mids->device_instance == 0)
536					cfg_hi.cfgx.dst_per = 3;
537				if (mids->device_instance == 1)
538					cfg_hi.cfgx.dst_per = 1;
539			} else if (mids->dirn == DMA_FROM_DEVICE) {
540				if (mids->device_instance == 0)
541					cfg_hi.cfgx.src_per = 2;
542				if (mids->device_instance == 1)
543					cfg_hi.cfgx.src_per = 0;
544				cfg_hi.cfgx.dst_per = 0;
545			}
546		} else {
547			cfg_hi.cfgx.protctl = 0x1; /*default value*/
548			cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
549					midc->ch_id - midc->dma->chan_base;
550		}
551	}
552
553	/*calculate CTL_HI*/
554	ctl_hi.ctlx.reser = 0;
555	width = mids->src_width;
556
557	ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
558	pr_debug("MDMA:calc len %d for block size %d\n",
559				ctl_hi.ctlx.block_ts, midc->dma->block_size);
560	/*calculate CTL_LO*/
561	ctl_lo.ctl_lo = 0;
562	ctl_lo.ctlx.int_en = 1;
563	ctl_lo.ctlx.dst_tr_width = mids->dst_width;
564	ctl_lo.ctlx.src_tr_width = mids->src_width;
565	ctl_lo.ctlx.dst_msize = mids->src_msize;
566	ctl_lo.ctlx.src_msize = mids->dst_msize;
567
568	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
569		ctl_lo.ctlx.tt_fc = 0;
570		ctl_lo.ctlx.sinc = 0;
571		ctl_lo.ctlx.dinc = 0;
572	} else {
573		if (mids->dirn == DMA_TO_DEVICE) {
574			ctl_lo.ctlx.sinc = 0;
575			ctl_lo.ctlx.dinc = 2;
576			ctl_lo.ctlx.tt_fc = 1;
577		} else if (mids->dirn == DMA_FROM_DEVICE) {
578			ctl_lo.ctlx.sinc = 2;
579			ctl_lo.ctlx.dinc = 0;
580			ctl_lo.ctlx.tt_fc = 2;
581		}
582	}
583
584	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
585		ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
586
587	enable_dma_interrupt(midc);
588
589	desc = midc_desc_get(midc);
590	if (desc == NULL)
591		goto err_desc_get;
592	desc->sar = src;
593	desc->dar = dest ;
594	desc->len = len;
595	desc->cfg_hi = cfg_hi.cfg_hi;
596	desc->cfg_lo = cfg_lo.cfg_lo;
597	desc->ctl_lo = ctl_lo.ctl_lo;
598	desc->ctl_hi = ctl_hi.ctl_hi;
599	desc->width = width;
600	desc->dirn = mids->dirn;
601	return &desc->txd;
602
603err_desc_get:
604	pr_err("ERR_MDMA: Failed to get desc\n");
605	midc_desc_put(midc, desc);
606	return NULL;
607}
608
609/**
610 * intel_mid_dma_free_chan_resources -	Frees dma resources
611 * @chan: chan requiring attention
612 *
613 * Frees the allocated resources on this DMA chan
614 */
615static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
616{
617	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
618	struct middma_device	*mid = to_middma_device(chan->device);
619	struct intel_mid_dma_desc	*desc, *_desc;
620
621	if (true == midc->in_use) {
622		/*trying to free ch in use!!!!!*/
623		pr_err("ERR_MDMA: trying to free ch in use\n");
624	}
625
626	spin_lock_bh(&midc->lock);
627	midc->descs_allocated = 0;
628	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
629		list_del(&desc->desc_node);
630		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
631	}
632	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
633		list_del(&desc->desc_node);
634		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
635	}
636	list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
637		list_del(&desc->desc_node);
638		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
639	}
640	spin_unlock_bh(&midc->lock);
641	midc->in_use = false;
642	/* Disable CH interrupts */
643	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
644	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
645}
646
647/**
648 * intel_mid_dma_alloc_chan_resources -	Allocate dma resources
649 * @chan: chan requiring attention
650 *
651 * Allocates DMA resources on this chan
652 * Return the descriptors allocated
653 */
654static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
655{
656	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
657	struct middma_device	*mid = to_middma_device(chan->device);
658	struct intel_mid_dma_desc	*desc;
659	dma_addr_t		phys;
660	int	i = 0;
661
662
663	/* ASSERT:  channel is idle */
664	if (test_ch_en(mid->dma_base, midc->ch_id)) {
665		/*ch is not idle*/
666		pr_err("ERR_MDMA: ch not idle\n");
667		return -EIO;
668	}
669	midc->completed = chan->cookie = 1;
670
671	spin_lock_bh(&midc->lock);
672	while (midc->descs_allocated < DESCS_PER_CHANNEL) {
673		spin_unlock_bh(&midc->lock);
674		desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
675		if (!desc) {
676			pr_err("ERR_MDMA: desc failed\n");
677			return -ENOMEM;
678			/*check*/
679		}
680		dma_async_tx_descriptor_init(&desc->txd, chan);
681		desc->txd.tx_submit = intel_mid_dma_tx_submit;
682		desc->txd.flags = DMA_CTRL_ACK;
683		desc->txd.phys = phys;
684		spin_lock_bh(&midc->lock);
685		i = ++midc->descs_allocated;
686		list_add_tail(&desc->desc_node, &midc->free_list);
687	}
688	spin_unlock_bh(&midc->lock);
689	midc->in_use = false;
690	pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
691	return i;
692}
693
694/**
695 * midc_handle_error -	Handle DMA txn error
696 * @mid: controller where error occured
697 * @midc: chan where error occured
698 *
699 * Scan the descriptor for error
700 */
701static void midc_handle_error(struct middma_device *mid,
702		struct intel_mid_dma_chan *midc)
703{
704	midc_scan_descriptors(mid, midc);
705}
706
707/**
708 * dma_tasklet -	DMA interrupt tasklet
709 * @data: tasklet arg (the controller structure)
710 *
711 * Scan the controller for interrupts for completion/error
712 * Clear the interrupt and call for handling completion/error
713 */
714static void dma_tasklet(unsigned long data)
715{
716	struct middma_device *mid = NULL;
717	struct intel_mid_dma_chan *midc = NULL;
718	u32 status;
719	int i;
720
721	mid = (struct middma_device *)data;
722	if (mid == NULL) {
723		pr_err("ERR_MDMA: tasklet Null param\n");
724		return;
725	}
726	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
727	status = ioread32(mid->dma_base + RAW_TFR);
728	pr_debug("MDMA:RAW_TFR %x\n", status);
729	status &= mid->intr_mask;
730	while (status) {
731		/*txn interrupt*/
732		i = get_ch_index(&status, mid->chan_base);
733		if (i < 0) {
734			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
735			return;
736		}
737		midc = &mid->ch[i];
738		if (midc == NULL) {
739			pr_err("ERR_MDMA:Null param midc\n");
740			return;
741		}
742		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
743				status, midc->ch_id, i);
744		/*clearing this interrupts first*/
745		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
746		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
747
748		spin_lock_bh(&midc->lock);
749		midc_scan_descriptors(mid, midc);
750		pr_debug("MDMA:Scan of desc... complete, unmasking\n");
751		iowrite32(UNMASK_INTR_REG(midc->ch_id),
752				mid->dma_base + MASK_TFR);
753		spin_unlock_bh(&midc->lock);
754	}
755
756	status = ioread32(mid->dma_base + RAW_ERR);
757	status &= mid->intr_mask;
758	while (status) {
759		/*err interrupt*/
760		i = get_ch_index(&status, mid->chan_base);
761		if (i < 0) {
762			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
763			return;
764		}
765		midc = &mid->ch[i];
766		if (midc == NULL) {
767			pr_err("ERR_MDMA:Null param midc\n");
768			return;
769		}
770		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
771				status, midc->ch_id, i);
772
773		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
774		spin_lock_bh(&midc->lock);
775		midc_handle_error(mid, midc);
776		iowrite32(UNMASK_INTR_REG(midc->ch_id),
777				mid->dma_base + MASK_ERR);
778		spin_unlock_bh(&midc->lock);
779	}
780	pr_debug("MDMA:Exiting takslet...\n");
781	return;
782}
783
784static void dma_tasklet1(unsigned long data)
785{
786	pr_debug("MDMA:in takslet1...\n");
787	return dma_tasklet(data);
788}
789
790static void dma_tasklet2(unsigned long data)
791{
792	pr_debug("MDMA:in takslet2...\n");
793	return dma_tasklet(data);
794}
795
796/**
797 * intel_mid_dma_interrupt -	DMA ISR
798 * @irq: IRQ where interrupt occurred
799 * @data: ISR cllback data (the controller structure)
800 *
801 * See if this is our interrupt if so then schedule the tasklet
802 * otherwise ignore
803 */
804static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
805{
806	struct middma_device *mid = data;
807	u32 status;
808	int call_tasklet = 0;
809
810	/*DMA Interrupt*/
811	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
812	if (!mid) {
813		pr_err("ERR_MDMA:null pointer mid\n");
814		return -EINVAL;
815	}
816
817	status = ioread32(mid->dma_base + RAW_TFR);
818	pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
819	status &= mid->intr_mask;
820	if (status) {
821		/*need to disable intr*/
822		iowrite32((status << 8), mid->dma_base + MASK_TFR);
823		pr_debug("MDMA: Calling tasklet %x\n", status);
824		call_tasklet = 1;
825	}
826	status = ioread32(mid->dma_base + RAW_ERR);
827	status &= mid->intr_mask;
828	if (status) {
829		iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
830		call_tasklet = 1;
831	}
832	if (call_tasklet)
833		tasklet_schedule(&mid->tasklet);
834
835	return IRQ_HANDLED;
836}
837
838static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
839{
840	return intel_mid_dma_interrupt(irq, data);
841}
842
843static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
844{
845	return intel_mid_dma_interrupt(irq, data);
846}
847
848/**
849 * mid_setup_dma -	Setup the DMA controller
850 * @pdev: Controller PCI device structure
851 *
852 * Initilize the DMA controller, channels, registers with DMA engine,
853 * ISR. Initilize DMA controller channels.
854 */
855static int mid_setup_dma(struct pci_dev *pdev)
856{
857	struct middma_device *dma = pci_get_drvdata(pdev);
858	int err, i;
859	unsigned int irq_level;
860
861	/* DMA coherent memory pool for DMA descriptor allocations */
862	dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
863					sizeof(struct intel_mid_dma_desc),
864					32, 0);
865	if (NULL == dma->dma_pool) {
866		pr_err("ERR_MDMA:pci_pool_create failed\n");
867		err = -ENOMEM;
868		kfree(dma);
869		goto err_dma_pool;
870	}
871
872	INIT_LIST_HEAD(&dma->common.channels);
873	dma->pci_id = pdev->device;
874	if (dma->pimr_mask) {
875		dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
876					LNW_PERIPHRAL_MASK_SIZE);
877		if (dma->mask_reg == NULL) {
878			pr_err("ERR_MDMA:Cant map periphral intr space !!\n");
879			return -ENOMEM;
880		}
881	} else
882		dma->mask_reg = NULL;
883
884	pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
885	/*init CH structures*/
886	dma->intr_mask = 0;
887	for (i = 0; i < dma->max_chan; i++) {
888		struct intel_mid_dma_chan *midch = &dma->ch[i];
889
890		midch->chan.device = &dma->common;
891		midch->chan.cookie =  1;
892		midch->chan.chan_id = i;
893		midch->ch_id = dma->chan_base + i;
894		pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
895
896		midch->dma_base = dma->dma_base;
897		midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
898		midch->dma = dma;
899		dma->intr_mask |= 1 << (dma->chan_base + i);
900		spin_lock_init(&midch->lock);
901
902		INIT_LIST_HEAD(&midch->active_list);
903		INIT_LIST_HEAD(&midch->queue);
904		INIT_LIST_HEAD(&midch->free_list);
905		/*mask interrupts*/
906		iowrite32(MASK_INTR_REG(midch->ch_id),
907			dma->dma_base + MASK_BLOCK);
908		iowrite32(MASK_INTR_REG(midch->ch_id),
909			dma->dma_base + MASK_SRC_TRAN);
910		iowrite32(MASK_INTR_REG(midch->ch_id),
911			dma->dma_base + MASK_DST_TRAN);
912		iowrite32(MASK_INTR_REG(midch->ch_id),
913			dma->dma_base + MASK_ERR);
914		iowrite32(MASK_INTR_REG(midch->ch_id),
915			dma->dma_base + MASK_TFR);
916
917		disable_dma_interrupt(midch);
918		list_add_tail(&midch->chan.device_node, &dma->common.channels);
919	}
920	pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
921
922	/*init dma structure*/
923	dma_cap_zero(dma->common.cap_mask);
924	dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
925	dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
926	dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
927	dma->common.dev = &pdev->dev;
928	dma->common.chancnt = dma->max_chan;
929
930	dma->common.device_alloc_chan_resources =
931					intel_mid_dma_alloc_chan_resources;
932	dma->common.device_free_chan_resources =
933					intel_mid_dma_free_chan_resources;
934
935	dma->common.device_tx_status = intel_mid_dma_tx_status;
936	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
937	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
938	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
939	dma->common.device_control = intel_mid_dma_device_control;
940
941	/*enable dma cntrl*/
942	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
943
944	/*register irq */
945	if (dma->pimr_mask) {
946		irq_level = IRQF_SHARED;
947		pr_debug("MDMA:Requesting irq shared for DMAC1\n");
948		err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
949			IRQF_SHARED, "INTEL_MID_DMAC1", dma);
950		if (0 != err)
951			goto err_irq;
952	} else {
953		dma->intr_mask = 0x03;
954		irq_level = 0;
955		pr_debug("MDMA:Requesting irq for DMAC2\n");
956		err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
957			0, "INTEL_MID_DMAC2", dma);
958		if (0 != err)
959			goto err_irq;
960	}
961	/*register device w/ engine*/
962	err = dma_async_device_register(&dma->common);
963	if (0 != err) {
964		pr_err("ERR_MDMA:device_register failed: %d\n", err);
965		goto err_engine;
966	}
967	if (dma->pimr_mask) {
968		pr_debug("setting up tasklet1 for DMAC1\n");
969		tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
970	} else {
971		pr_debug("setting up tasklet2 for DMAC2\n");
972		tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
973	}
974	return 0;
975
976err_engine:
977	free_irq(pdev->irq, dma);
978err_irq:
979	pci_pool_destroy(dma->dma_pool);
980	kfree(dma);
981err_dma_pool:
982	pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
983	return err;
984
985}
986
987/**
988 * middma_shutdown -	Shutdown the DMA controller
989 * @pdev: Controller PCI device structure
990 *
991 * Called by remove
992 * Unregister DMa controller, clear all structures and free interrupt
993 */
994static void middma_shutdown(struct pci_dev *pdev)
995{
996	struct middma_device *device = pci_get_drvdata(pdev);
997
998	dma_async_device_unregister(&device->common);
999	pci_pool_destroy(device->dma_pool);
1000	if (device->mask_reg)
1001		iounmap(device->mask_reg);
1002	if (device->dma_base)
1003		iounmap(device->dma_base);
1004	free_irq(pdev->irq, device);
1005	return;
1006}
1007
1008/**
1009 * intel_mid_dma_probe -	PCI Probe
1010 * @pdev: Controller PCI device structure
1011 * @id: pci device id structure
1012 *
1013 * Initilize the PCI device, map BARs, query driver data.
1014 * Call setup_dma to complete contoller and chan initilzation
1015 */
1016static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1017					const struct pci_device_id *id)
1018{
1019	struct middma_device *device;
1020	u32 base_addr, bar_size;
1021	struct intel_mid_dma_probe_info *info;
1022	int err;
1023
1024	pr_debug("MDMA: probe for %x\n", pdev->device);
1025	info = (void *)id->driver_data;
1026	pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1027				info->max_chan, info->ch_base,
1028				info->block_size, info->pimr_mask);
1029
1030	err = pci_enable_device(pdev);
1031	if (err)
1032		goto err_enable_device;
1033
1034	err = pci_request_regions(pdev, "intel_mid_dmac");
1035	if (err)
1036		goto err_request_regions;
1037
1038	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1039	if (err)
1040		goto err_set_dma_mask;
1041
1042	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1043	if (err)
1044		goto err_set_dma_mask;
1045
1046	device = kzalloc(sizeof(*device), GFP_KERNEL);
1047	if (!device) {
1048		pr_err("ERR_MDMA:kzalloc failed probe\n");
1049		err = -ENOMEM;
1050		goto err_kzalloc;
1051	}
1052	device->pdev = pci_dev_get(pdev);
1053
1054	base_addr = pci_resource_start(pdev, 0);
1055	bar_size  = pci_resource_len(pdev, 0);
1056	device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1057	if (!device->dma_base) {
1058		pr_err("ERR_MDMA:ioremap failed\n");
1059		err = -ENOMEM;
1060		goto err_ioremap;
1061	}
1062	pci_set_drvdata(pdev, device);
1063	pci_set_master(pdev);
1064	device->max_chan = info->max_chan;
1065	device->chan_base = info->ch_base;
1066	device->block_size = info->block_size;
1067	device->pimr_mask = info->pimr_mask;
1068
1069	err = mid_setup_dma(pdev);
1070	if (err)
1071		goto err_dma;
1072
1073	return 0;
1074
1075err_dma:
1076	iounmap(device->dma_base);
1077err_ioremap:
1078	pci_dev_put(pdev);
1079	kfree(device);
1080err_kzalloc:
1081err_set_dma_mask:
1082	pci_release_regions(pdev);
1083	pci_disable_device(pdev);
1084err_request_regions:
1085err_enable_device:
1086	pr_err("ERR_MDMA:Probe failed %d\n", err);
1087	return err;
1088}
1089
1090/**
1091 * intel_mid_dma_remove -	PCI remove
1092 * @pdev: Controller PCI device structure
1093 *
1094 * Free up all resources and data
1095 * Call shutdown_dma to complete contoller and chan cleanup
1096 */
1097static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1098{
1099	struct middma_device *device = pci_get_drvdata(pdev);
1100	middma_shutdown(pdev);
1101	pci_dev_put(pdev);
1102	kfree(device);
1103	pci_release_regions(pdev);
1104	pci_disable_device(pdev);
1105}
1106
1107/******************************************************************************
1108* PCI stuff
1109*/
1110static struct pci_device_id intel_mid_dma_ids[] = {
1111	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),	INFO(2, 6, 4095, 0x200020)},
1112	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),	INFO(2, 0, 2047, 0)},
1113	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),	INFO(2, 0, 2047, 0)},
1114	{ PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),	INFO(4, 0, 4095, 0x400040)},
1115	{ 0, }
1116};
1117MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1118
1119static struct pci_driver intel_mid_dma_pci = {
1120	.name		=	"Intel MID DMA",
1121	.id_table	=	intel_mid_dma_ids,
1122	.probe		=	intel_mid_dma_probe,
1123	.remove		=	__devexit_p(intel_mid_dma_remove),
1124};
1125
1126static int __init intel_mid_dma_init(void)
1127{
1128	pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1129			INTEL_MID_DMA_DRIVER_VERSION);
1130	return pci_register_driver(&intel_mid_dma_pci);
1131}
1132fs_initcall(intel_mid_dma_init);
1133
1134static void __exit intel_mid_dma_exit(void)
1135{
1136	pci_unregister_driver(&intel_mid_dma_pci);
1137}
1138module_exit(intel_mid_dma_exit);
1139
1140MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1141MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1142MODULE_LICENSE("GPL v2");
1143MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
1144