1/*
2 *  linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 *  Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 *  derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 *  2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 *             Changed to conform redesigned i.MX scatter gather DMA interface
15 *
16 *  2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17 *             Updated for 2.6.14 kernel
18 *
19 *  2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20 *             Found and corrected problems in the write path
21 *
22 *  2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23 *             The event handling rewritten right way in softirq.
24 *             Added many ugly hacks and delays to overcome SDHC
25 *             deficiencies
26 *
27 */
28
29#ifdef CONFIG_MMC_DEBUG
30#define DEBUG
31#else
32#undef  DEBUG
33#endif
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/ioport.h>
38#include <linux/platform_device.h>
39#include <linux/interrupt.h>
40#include <linux/blkdev.h>
41#include <linux/dma-mapping.h>
42#include <linux/mmc/host.h>
43#include <linux/mmc/card.h>
44#include <linux/delay.h>
45
46#include <asm/dma.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/sizes.h>
50#include <asm/arch/mmc.h>
51#include <asm/arch/imx-dma.h>
52
53#include "imxmmc.h"
54
55#define DRIVER_NAME "imx-mmc"
56
57#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
58	              INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
59		      INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
60
61struct imxmci_host {
62	struct mmc_host		*mmc;
63	spinlock_t		lock;
64	struct resource		*res;
65	int			irq;
66	imx_dmach_t		dma;
67	unsigned int		clkrt;
68	unsigned int		cmdat;
69	volatile unsigned int	imask;
70	unsigned int		power_mode;
71	unsigned int		present;
72	struct imxmmc_platform_data *pdata;
73
74	struct mmc_request	*req;
75	struct mmc_command	*cmd;
76	struct mmc_data		*data;
77
78	struct timer_list	timer;
79	struct tasklet_struct	tasklet;
80	unsigned int		status_reg;
81	unsigned long		pending_events;
82	/* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */
83	u16			*data_ptr;
84	unsigned int		data_cnt;
85	atomic_t		stuck_timeout;
86
87	unsigned int		dma_nents;
88	unsigned int		dma_size;
89	unsigned int		dma_dir;
90	int			dma_allocated;
91
92	unsigned char		actual_bus_width;
93
94	int			prev_cmd_code;
95};
96
97#define IMXMCI_PEND_IRQ_b	0
98#define IMXMCI_PEND_DMA_END_b	1
99#define IMXMCI_PEND_DMA_ERR_b	2
100#define IMXMCI_PEND_WAIT_RESP_b	3
101#define IMXMCI_PEND_DMA_DATA_b	4
102#define IMXMCI_PEND_CPU_DATA_b	5
103#define IMXMCI_PEND_CARD_XCHG_b	6
104#define IMXMCI_PEND_SET_INIT_b	7
105#define IMXMCI_PEND_STARTED_b	8
106
107#define IMXMCI_PEND_IRQ_m	(1 << IMXMCI_PEND_IRQ_b)
108#define IMXMCI_PEND_DMA_END_m	(1 << IMXMCI_PEND_DMA_END_b)
109#define IMXMCI_PEND_DMA_ERR_m	(1 << IMXMCI_PEND_DMA_ERR_b)
110#define IMXMCI_PEND_WAIT_RESP_m	(1 << IMXMCI_PEND_WAIT_RESP_b)
111#define IMXMCI_PEND_DMA_DATA_m	(1 << IMXMCI_PEND_DMA_DATA_b)
112#define IMXMCI_PEND_CPU_DATA_m	(1 << IMXMCI_PEND_CPU_DATA_b)
113#define IMXMCI_PEND_CARD_XCHG_m	(1 << IMXMCI_PEND_CARD_XCHG_b)
114#define IMXMCI_PEND_SET_INIT_m	(1 << IMXMCI_PEND_SET_INIT_b)
115#define IMXMCI_PEND_STARTED_m	(1 << IMXMCI_PEND_STARTED_b)
116
117static void imxmci_stop_clock(struct imxmci_host *host)
118{
119	int i = 0;
120	MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK;
121	while(i < 0x1000) {
122	        if(!(i & 0x7f))
123			MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK;
124
125		if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) {
126			/* Check twice before cut */
127			if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN))
128				return;
129		}
130
131		i++;
132	}
133	dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
134}
135
136static int imxmci_start_clock(struct imxmci_host *host)
137{
138	unsigned int trials = 0;
139	unsigned int delay_limit = 128;
140	unsigned long flags;
141
142	MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
143
144	clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
145
146	/*
147	 * Command start of the clock, this usually succeeds in less
148	 * then 6 delay loops, but during card detection (low clockrate)
149	 * it takes up to 5000 delay loops and sometimes fails for the first time
150	 */
151	MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
152
153	do {
154		unsigned int delay = delay_limit;
155
156		while(delay--){
157			if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
158				/* Check twice before cut */
159				if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
160					return 0;
161
162			if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
163				return 0;
164		}
165
166		local_irq_save(flags);
167		/*
168		 * Ensure, that request is not doubled under all possible circumstances.
169		 * It is possible, that cock running state is missed, because some other
170		 * IRQ or schedule delays this function execution and the clocks has
171		 * been already stopped by other means (response processing, SDHC HW)
172		 */
173		if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
174			MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
175		local_irq_restore(flags);
176
177	} while(++trials<256);
178
179	dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
180
181	return -1;
182}
183
184static void imxmci_softreset(void)
185{
186	/* reset sequence */
187	MMC_STR_STP_CLK = 0x8;
188	MMC_STR_STP_CLK = 0xD;
189	MMC_STR_STP_CLK = 0x5;
190	MMC_STR_STP_CLK = 0x5;
191	MMC_STR_STP_CLK = 0x5;
192	MMC_STR_STP_CLK = 0x5;
193	MMC_STR_STP_CLK = 0x5;
194	MMC_STR_STP_CLK = 0x5;
195	MMC_STR_STP_CLK = 0x5;
196	MMC_STR_STP_CLK = 0x5;
197
198	MMC_RES_TO = 0xff;
199	MMC_BLK_LEN = 512;
200	MMC_NOB = 1;
201}
202
203static int imxmci_busy_wait_for_status(struct imxmci_host *host,
204			unsigned int *pstat, unsigned int stat_mask,
205			int timeout, const char *where)
206{
207	int loops=0;
208	while(!(*pstat & stat_mask)) {
209		loops+=2;
210		if(loops >= timeout) {
211			dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
212				where, *pstat, stat_mask);
213			return -1;
214		}
215		udelay(2);
216		*pstat |= MMC_STATUS;
217	}
218	if(!loops)
219		return 0;
220
221	/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
222	if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
223		dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
224			loops, where, *pstat, stat_mask);
225	return loops;
226}
227
228static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
229{
230	unsigned int nob = data->blocks;
231	unsigned int blksz = data->blksz;
232	unsigned int datasz = nob * blksz;
233	int i;
234
235	if (data->flags & MMC_DATA_STREAM)
236		nob = 0xffff;
237
238	host->data = data;
239	data->bytes_xfered = 0;
240
241	MMC_NOB = nob;
242	MMC_BLK_LEN = blksz;
243
244	/*
245	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
246	 * We are in big troubles for non-512 byte transfers according to note in the paragraph
247	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
248	 * The situation is even more complex in reality. The SDHC in not able to handle wll
249	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
250	 * This is required for SCR read at least.
251	 */
252	if (datasz < 512) {
253		host->dma_size = datasz;
254		if (data->flags & MMC_DATA_READ) {
255			host->dma_dir = DMA_FROM_DEVICE;
256
257			/* Hack to enable read SCR */
258			MMC_NOB = 1;
259			MMC_BLK_LEN = 512;
260		} else {
261			host->dma_dir = DMA_TO_DEVICE;
262		}
263
264		/* Convert back to virtual address */
265		host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
266		host->data_cnt = 0;
267
268		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
269		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
270
271		return;
272	}
273
274	if (data->flags & MMC_DATA_READ) {
275		host->dma_dir = DMA_FROM_DEVICE;
276		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
277						data->sg_len,  host->dma_dir);
278
279		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
280			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);
281
282		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
283		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
284	} else {
285		host->dma_dir = DMA_TO_DEVICE;
286
287		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
288						data->sg_len,  host->dma_dir);
289
290		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
291			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);
292
293		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
294		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
295	}
296
297	host->dma_size = 0;
298	for(i=0; i<host->dma_nents; i++)
299		host->dma_size+=data->sg[i].length;
300
301	if (datasz > host->dma_size) {
302		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
303		       datasz, host->dma_size);
304	}
305
306	host->dma_size = datasz;
307
308	wmb();
309
310	if(host->actual_bus_width == MMC_BUS_WIDTH_4)
311		BLR(host->dma) = 0;	/* burst 64 byte read / 64 bytes write */
312	else
313		BLR(host->dma) = 16;	/* burst 16 byte read / 16 bytes write */
314
315	RSSR(host->dma) = DMA_REQ_SDHC;
316
317	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
318	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
319
320	/* start DMA engine for read, write is delayed after initial response */
321	if (host->dma_dir == DMA_FROM_DEVICE) {
322		imx_dma_enable(host->dma);
323	}
324}
325
326static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
327{
328	unsigned long flags;
329	u32 imask;
330
331	WARN_ON(host->cmd != NULL);
332	host->cmd = cmd;
333
334	/* Ensure, that clock are stopped else command programming and start fails */
335	imxmci_stop_clock(host);
336
337	if (cmd->flags & MMC_RSP_BUSY)
338		cmdat |= CMD_DAT_CONT_BUSY;
339
340	switch (mmc_resp_type(cmd)) {
341	case MMC_RSP_R1: /* short CRC, OPCODE */
342	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
343		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
344		break;
345	case MMC_RSP_R2: /* long 136 bit + CRC */
346		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
347		break;
348	case MMC_RSP_R3: /* short */
349		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
350		break;
351	default:
352		break;
353	}
354
355	if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) )
356		cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
357
358	if ( host->actual_bus_width == MMC_BUS_WIDTH_4 )
359		cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
360
361	MMC_CMD = cmd->opcode;
362	MMC_ARGH = cmd->arg >> 16;
363	MMC_ARGL = cmd->arg & 0xffff;
364	MMC_CMD_DAT_CONT = cmdat;
365
366	atomic_set(&host->stuck_timeout, 0);
367	set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
368
369
370	imask = IMXMCI_INT_MASK_DEFAULT;
371	imask &= ~INT_MASK_END_CMD_RES;
372	if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) {
373		/*imask &= ~INT_MASK_BUF_READY;*/
374		imask &= ~INT_MASK_DATA_TRAN;
375		if ( cmdat & CMD_DAT_CONT_WRITE )
376			imask &= ~INT_MASK_WRITE_OP_DONE;
377		if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
378			imask &= ~INT_MASK_BUF_READY;
379	}
380
381	spin_lock_irqsave(&host->lock, flags);
382	host->imask = imask;
383	MMC_INT_MASK = host->imask;
384	spin_unlock_irqrestore(&host->lock, flags);
385
386	dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
387		cmd->opcode, cmd->opcode, imask);
388
389	imxmci_start_clock(host);
390}
391
392static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
393{
394	unsigned long flags;
395
396	spin_lock_irqsave(&host->lock, flags);
397
398	host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
399			IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
400
401	host->imask = IMXMCI_INT_MASK_DEFAULT;
402	MMC_INT_MASK = host->imask;
403
404	spin_unlock_irqrestore(&host->lock, flags);
405
406	if(req && req->cmd)
407		host->prev_cmd_code = req->cmd->opcode;
408
409	host->req = NULL;
410	host->cmd = NULL;
411	host->data = NULL;
412	mmc_request_done(host->mmc, req);
413}
414
415static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
416{
417	struct mmc_data *data = host->data;
418	int data_error;
419
420	if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
421		imx_dma_disable(host->dma);
422		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
423			     host->dma_dir);
424	}
425
426	if ( stat & STATUS_ERR_MASK ) {
427		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
428		if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
429			data->error = MMC_ERR_BADCRC;
430		else if(stat & STATUS_TIME_OUT_READ)
431			data->error = MMC_ERR_TIMEOUT;
432		else
433			data->error = MMC_ERR_FAILED;
434	} else {
435		data->bytes_xfered = host->dma_size;
436	}
437
438	data_error = data->error;
439
440	host->data = NULL;
441
442	return data_error;
443}
444
445static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
446{
447	struct mmc_command *cmd = host->cmd;
448	int i;
449	u32 a,b,c;
450	struct mmc_data *data = host->data;
451
452	if (!cmd)
453		return 0;
454
455	host->cmd = NULL;
456
457	if (stat & STATUS_TIME_OUT_RESP) {
458		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
459		cmd->error = MMC_ERR_TIMEOUT;
460	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
461		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
462		cmd->error = MMC_ERR_BADCRC;
463	}
464
465	if(cmd->flags & MMC_RSP_PRESENT) {
466		if(cmd->flags & MMC_RSP_136) {
467			for (i = 0; i < 4; i++) {
468				u32 a = MMC_RES_FIFO & 0xffff;
469				u32 b = MMC_RES_FIFO & 0xffff;
470				cmd->resp[i] = a<<16 | b;
471			}
472		} else {
473			a = MMC_RES_FIFO & 0xffff;
474			b = MMC_RES_FIFO & 0xffff;
475			c = MMC_RES_FIFO & 0xffff;
476			cmd->resp[0] = a<<24 | b<<8 | c>>8;
477		}
478	}
479
480	dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
481		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
482
483	if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) {
484		if (host->req->data->flags & MMC_DATA_WRITE) {
485
486			/* Wait for FIFO to be empty before starting DMA write */
487
488			stat = MMC_STATUS;
489			if(imxmci_busy_wait_for_status(host, &stat,
490				STATUS_APPL_BUFF_FE,
491				40, "imxmci_cmd_done DMA WR") < 0) {
492				cmd->error = MMC_ERR_FIFO;
493				imxmci_finish_data(host, stat);
494				if(host->req)
495					imxmci_finish_request(host, host->req);
496				dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
497				       stat);
498				return 0;
499			}
500
501			if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
502				imx_dma_enable(host->dma);
503			}
504		}
505	} else {
506		struct mmc_request *req;
507		imxmci_stop_clock(host);
508		req = host->req;
509
510		if(data)
511			imxmci_finish_data(host, stat);
512
513		if( req ) {
514			imxmci_finish_request(host, req);
515		} else {
516			dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
517		}
518	}
519
520	return 1;
521}
522
523static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
524{
525	struct mmc_data *data = host->data;
526	int data_error;
527
528	if (!data)
529		return 0;
530
531	data_error = imxmci_finish_data(host, stat);
532
533	if (host->req->stop) {
534		imxmci_stop_clock(host);
535		imxmci_start_cmd(host, host->req->stop, 0);
536	} else {
537		struct mmc_request *req;
538		req = host->req;
539		if( req ) {
540			imxmci_finish_request(host, req);
541		} else {
542			dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
543		}
544	}
545
546	return 1;
547}
548
549static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
550{
551	int i;
552	int burst_len;
553	int trans_done = 0;
554	unsigned int stat = *pstat;
555
556	if(host->actual_bus_width != MMC_BUS_WIDTH_4)
557		burst_len = 16;
558	else
559		burst_len = 64;
560
561	/* This is unfortunately required */
562	dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
563		stat);
564
565	udelay(20);	/* required for clocks < 8MHz*/
566
567	if(host->dma_dir == DMA_FROM_DEVICE) {
568		imxmci_busy_wait_for_status(host, &stat,
569				STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
570				STATUS_TIME_OUT_READ,
571				50, "imxmci_cpu_driven_data read");
572
573		while((stat & (STATUS_APPL_BUFF_FF |  STATUS_DATA_TRANS_DONE)) &&
574		      !(stat & STATUS_TIME_OUT_READ) &&
575		      (host->data_cnt < 512)) {
576
577			udelay(20);	/* required for clocks < 8MHz*/
578
579			for(i = burst_len; i>=2 ; i-=2) {
580				u16 data;
581				data = MMC_BUFFER_ACCESS;
582				udelay(10);	/* required for clocks < 8MHz*/
583				if(host->data_cnt+2 <= host->dma_size) {
584					*(host->data_ptr++) = data;
585				} else {
586					if(host->data_cnt < host->dma_size)
587						*(u8*)(host->data_ptr) = data;
588				}
589				host->data_cnt += 2;
590			}
591
592			stat = MMC_STATUS;
593
594			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
595				host->data_cnt, burst_len, stat);
596		}
597
598		if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
599			trans_done = 1;
600
601		if(host->dma_size & 0x1ff)
602			stat &= ~STATUS_CRC_READ_ERR;
603
604		if(stat & STATUS_TIME_OUT_READ) {
605			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
606				stat);
607			trans_done = -1;
608		}
609
610	} else {
611		imxmci_busy_wait_for_status(host, &stat,
612				STATUS_APPL_BUFF_FE,
613				20, "imxmci_cpu_driven_data write");
614
615		while((stat & STATUS_APPL_BUFF_FE) &&
616		      (host->data_cnt < host->dma_size)) {
617			if(burst_len >= host->dma_size - host->data_cnt) {
618				burst_len = host->dma_size - host->data_cnt;
619				host->data_cnt = host->dma_size;
620				trans_done = 1;
621			} else {
622				host->data_cnt += burst_len;
623			}
624
625			for(i = burst_len; i>0 ; i-=2)
626				MMC_BUFFER_ACCESS = *(host->data_ptr++);
627
628			stat = MMC_STATUS;
629
630			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
631				burst_len, stat);
632		}
633	}
634
635	*pstat = stat;
636
637	return trans_done;
638}
639
640static void imxmci_dma_irq(int dma, void *devid)
641{
642	struct imxmci_host *host = devid;
643	uint32_t stat = MMC_STATUS;
644
645	atomic_set(&host->stuck_timeout, 0);
646	host->status_reg = stat;
647	set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
648	tasklet_schedule(&host->tasklet);
649}
650
651static irqreturn_t imxmci_irq(int irq, void *devid)
652{
653	struct imxmci_host *host = devid;
654	uint32_t stat = MMC_STATUS;
655	int handled = 1;
656
657	MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT;
658
659	atomic_set(&host->stuck_timeout, 0);
660	host->status_reg = stat;
661	set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
662	set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
663	tasklet_schedule(&host->tasklet);
664
665	return IRQ_RETVAL(handled);;
666}
667
668static void imxmci_tasklet_fnc(unsigned long data)
669{
670	struct imxmci_host *host = (struct imxmci_host *)data;
671	u32 stat;
672	unsigned int data_dir_mask = 0;	/* STATUS_WR_CRC_ERROR_CODE_MASK */
673	int timeout = 0;
674
675	if(atomic_read(&host->stuck_timeout) > 4) {
676		char *what;
677		timeout = 1;
678		stat = MMC_STATUS;
679		host->status_reg = stat;
680		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
681			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
682				what = "RESP+DMA";
683			else
684				what = "RESP";
685		else
686			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
687				if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
688					what = "DATA";
689				else
690					what = "DMA";
691			else
692				what = "???";
693
694		dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
695		       what, stat, MMC_INT_MASK);
696		dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
697		       MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
698		dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
699		       host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
700	}
701
702	if(!host->present || timeout)
703		host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
704				    STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
705
706	if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
707		clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
708
709		stat = MMC_STATUS;
710		/*
711		 * This is not required in theory, but there is chance to miss some flag
712		 * which clears automatically by mask write, FreeScale original code keeps
713		 * stat from IRQ time so do I
714		 */
715		stat |= host->status_reg;
716
717		if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
718			stat &= ~STATUS_CRC_READ_ERR;
719
720		if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
721			imxmci_busy_wait_for_status(host, &stat,
722					STATUS_END_CMD_RESP | STATUS_ERR_MASK,
723					20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
724		}
725
726		if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
727			if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
728				imxmci_cmd_done(host, stat);
729			if(host->data && (stat & STATUS_ERR_MASK))
730				imxmci_data_done(host, stat);
731		}
732
733		if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
734			stat |= MMC_STATUS;
735			if(imxmci_cpu_driven_data(host, &stat)){
736				if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
737					imxmci_cmd_done(host, stat);
738				atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
739							&host->pending_events);
740				imxmci_data_done(host, stat);
741			}
742		}
743	}
744
745	if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
746	   !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
747
748		stat = MMC_STATUS;
749		/* Same as above */
750		stat |= host->status_reg;
751
752		if(host->dma_dir == DMA_TO_DEVICE) {
753			data_dir_mask = STATUS_WRITE_OP_DONE;
754		} else {
755			data_dir_mask = STATUS_DATA_TRANS_DONE;
756		}
757
758		if(stat & data_dir_mask) {
759			clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
760			imxmci_data_done(host, stat);
761		}
762	}
763
764	if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
765
766		if(host->cmd)
767			imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
768
769		if(host->data)
770			imxmci_data_done(host, STATUS_TIME_OUT_READ |
771					 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
772
773		if(host->req)
774			imxmci_finish_request(host, host->req);
775
776		mmc_detect_change(host->mmc, msecs_to_jiffies(100));
777
778	}
779}
780
781static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
782{
783	struct imxmci_host *host = mmc_priv(mmc);
784	unsigned int cmdat;
785
786	WARN_ON(host->req != NULL);
787
788	host->req = req;
789
790	cmdat = 0;
791
792	if (req->data) {
793		imxmci_setup_data(host, req->data);
794
795		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
796
797		if (req->data->flags & MMC_DATA_WRITE)
798			cmdat |= CMD_DAT_CONT_WRITE;
799
800		if (req->data->flags & MMC_DATA_STREAM) {
801			cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
802		}
803	}
804
805	imxmci_start_cmd(host, req->cmd, cmdat);
806}
807
808#define CLK_RATE 19200000
809
810static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
811{
812	struct imxmci_host *host = mmc_priv(mmc);
813	int prescaler;
814
815	if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
816		host->actual_bus_width = MMC_BUS_WIDTH_4;
817		imx_gpio_mode(PB11_PF_SD_DAT3);
818	}else{
819		host->actual_bus_width = MMC_BUS_WIDTH_1;
820		imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
821	}
822
823	if ( host->power_mode != ios->power_mode ) {
824		switch (ios->power_mode) {
825		case MMC_POWER_OFF:
826        		break;
827		case MMC_POWER_UP:
828			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
829        		break;
830		case MMC_POWER_ON:
831        		break;
832		}
833		host->power_mode = ios->power_mode;
834	}
835
836	if ( ios->clock ) {
837		unsigned int clk;
838
839		/* The prescaler is 5 for PERCLK2 equal to 96MHz
840		 * then 96MHz / 5 = 19.2 MHz
841		 */
842		clk=imx_get_perclk2();
843		prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
844		switch(prescaler) {
845		case 0:
846		case 1:	prescaler = 0;
847			break;
848		case 2:	prescaler = 1;
849			break;
850		case 3:	prescaler = 2;
851			break;
852		case 4:	prescaler = 4;
853			break;
854		default:
855		case 5:	prescaler = 5;
856			break;
857		}
858
859		dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
860			clk, prescaler);
861
862		for(clk=0; clk<8; clk++) {
863			int x;
864			x = CLK_RATE / (1<<clk);
865			if( x <= ios->clock)
866				break;
867		}
868
869		MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */
870
871		imxmci_stop_clock(host);
872		MMC_CLK_RATE = (prescaler<<3) | clk;
873		/*
874		 * Under my understanding, clock should not be started there, because it would
875		 * initiate SDHC sequencer and send last or random command into card
876		 */
877		/*imxmci_start_clock(host);*/
878
879		dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
880	} else {
881		imxmci_stop_clock(host);
882	}
883}
884
885static const struct mmc_host_ops imxmci_ops = {
886	.request	= imxmci_request,
887	.set_ios	= imxmci_set_ios,
888};
889
890static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
891{
892	int i;
893
894	for (i = 0; i < dev->num_resources; i++)
895		if (dev->resource[i].flags == mask && nr-- == 0)
896			return &dev->resource[i];
897	return NULL;
898}
899
900static int platform_device_irq(struct platform_device *dev, int nr)
901{
902	int i;
903
904	for (i = 0; i < dev->num_resources; i++)
905		if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
906			return dev->resource[i].start;
907	return NO_IRQ;
908}
909
910static void imxmci_check_status(unsigned long data)
911{
912	struct imxmci_host *host = (struct imxmci_host *)data;
913
914	if( host->pdata->card_present() != host->present ) {
915		host->present ^= 1;
916		dev_info(mmc_dev(host->mmc), "card %s\n",
917		      host->present ? "inserted" : "removed");
918
919		set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
920		tasklet_schedule(&host->tasklet);
921	}
922
923	if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
924	   test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
925		atomic_inc(&host->stuck_timeout);
926		if(atomic_read(&host->stuck_timeout) > 4)
927			tasklet_schedule(&host->tasklet);
928	} else {
929		atomic_set(&host->stuck_timeout, 0);
930
931	}
932
933	mod_timer(&host->timer, jiffies + (HZ>>1));
934}
935
936static int imxmci_probe(struct platform_device *pdev)
937{
938	struct mmc_host *mmc;
939	struct imxmci_host *host = NULL;
940	struct resource *r;
941	int ret = 0, irq;
942
943	printk(KERN_INFO "i.MX mmc driver\n");
944
945	r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
946	irq = platform_device_irq(pdev, 0);
947	if (!r || irq == NO_IRQ)
948		return -ENXIO;
949
950	r = request_mem_region(r->start, 0x100, "IMXMCI");
951	if (!r)
952		return -EBUSY;
953
954	mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
955	if (!mmc) {
956		ret = -ENOMEM;
957		goto out;
958	}
959
960	mmc->ops = &imxmci_ops;
961	mmc->f_min = 150000;
962	mmc->f_max = CLK_RATE/2;
963	mmc->ocr_avail = MMC_VDD_32_33;
964	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK;
965
966	/* MMC core transfer sizes tunable parameters */
967	mmc->max_hw_segs = 64;
968	mmc->max_phys_segs = 64;
969	mmc->max_seg_size = 64*512;	/* default PAGE_CACHE_SIZE */
970	mmc->max_req_size = 64*512;	/* default PAGE_CACHE_SIZE */
971	mmc->max_blk_size = 2048;
972	mmc->max_blk_count = 65535;
973
974	host = mmc_priv(mmc);
975	host->mmc = mmc;
976	host->dma_allocated = 0;
977	host->pdata = pdev->dev.platform_data;
978
979	spin_lock_init(&host->lock);
980	host->res = r;
981	host->irq = irq;
982
983	imx_gpio_mode(PB8_PF_SD_DAT0);
984	imx_gpio_mode(PB9_PF_SD_DAT1);
985	imx_gpio_mode(PB10_PF_SD_DAT2);
986	/* Configured as GPIO with pull-up to ensure right MCC card mode */
987	/* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
988	imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
989	/* imx_gpio_mode(PB11_PF_SD_DAT3); */
990	imx_gpio_mode(PB12_PF_SD_CLK);
991	imx_gpio_mode(PB13_PF_SD_CMD);
992
993	imxmci_softreset();
994
995	if ( MMC_REV_NO != 0x390 ) {
996		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
997		        MMC_REV_NO);
998		goto out;
999	}
1000
1001	MMC_READ_TO = 0x2db4; /* recommended in data sheet */
1002
1003	host->imask = IMXMCI_INT_MASK_DEFAULT;
1004	MMC_INT_MASK = host->imask;
1005
1006
1007	if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){
1008		dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1009		ret = -EBUSY;
1010		goto out;
1011	}
1012	host->dma_allocated=1;
1013	imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1014
1015	tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1016	host->status_reg=0;
1017	host->pending_events=0;
1018
1019	ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1020	if (ret)
1021		goto out;
1022
1023	host->present = host->pdata->card_present();
1024	init_timer(&host->timer);
1025	host->timer.data = (unsigned long)host;
1026	host->timer.function = imxmci_check_status;
1027	add_timer(&host->timer);
1028	mod_timer(&host->timer, jiffies + (HZ>>1));
1029
1030	platform_set_drvdata(pdev, mmc);
1031
1032	mmc_add_host(mmc);
1033
1034	return 0;
1035
1036out:
1037	if (host) {
1038		if(host->dma_allocated){
1039			imx_dma_free(host->dma);
1040			host->dma_allocated=0;
1041		}
1042	}
1043	if (mmc)
1044		mmc_free_host(mmc);
1045	release_resource(r);
1046	return ret;
1047}
1048
1049static int imxmci_remove(struct platform_device *pdev)
1050{
1051	struct mmc_host *mmc = platform_get_drvdata(pdev);
1052
1053	platform_set_drvdata(pdev, NULL);
1054
1055	if (mmc) {
1056		struct imxmci_host *host = mmc_priv(mmc);
1057
1058		tasklet_disable(&host->tasklet);
1059
1060		del_timer_sync(&host->timer);
1061		mmc_remove_host(mmc);
1062
1063		free_irq(host->irq, host);
1064		if(host->dma_allocated){
1065			imx_dma_free(host->dma);
1066			host->dma_allocated=0;
1067		}
1068
1069		tasklet_kill(&host->tasklet);
1070
1071		release_resource(host->res);
1072
1073		mmc_free_host(mmc);
1074	}
1075	return 0;
1076}
1077
1078#ifdef CONFIG_PM
1079static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1080{
1081	struct mmc_host *mmc = platform_get_drvdata(dev);
1082	int ret = 0;
1083
1084	if (mmc)
1085		ret = mmc_suspend_host(mmc, state);
1086
1087	return ret;
1088}
1089
1090static int imxmci_resume(struct platform_device *dev)
1091{
1092	struct mmc_host *mmc = platform_get_drvdata(dev);
1093	struct imxmci_host *host;
1094	int ret = 0;
1095
1096	if (mmc) {
1097		host = mmc_priv(mmc);
1098		if(host)
1099			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1100		ret = mmc_resume_host(mmc);
1101	}
1102
1103	return ret;
1104}
1105#else
1106#define imxmci_suspend  NULL
1107#define imxmci_resume   NULL
1108#endif /* CONFIG_PM */
1109
1110static struct platform_driver imxmci_driver = {
1111	.probe		= imxmci_probe,
1112	.remove		= imxmci_remove,
1113	.suspend	= imxmci_suspend,
1114	.resume		= imxmci_resume,
1115	.driver		= {
1116		.name		= DRIVER_NAME,
1117	}
1118};
1119
1120static int __init imxmci_init(void)
1121{
1122	return platform_driver_register(&imxmci_driver);
1123}
1124
1125static void __exit imxmci_exit(void)
1126{
1127	platform_driver_unregister(&imxmci_driver);
1128}
1129
1130module_init(imxmci_init);
1131module_exit(imxmci_exit);
1132
1133MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1134MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1135MODULE_LICENSE("GPL");
1136