1/*
2 * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
3 *
4 *  Copyright (c) 2005, Advanced Micro Devices, Inc.
5 *
6 *  Developed with help from the 2.4.30 MMC AU1XXX controller including
7 *  the following copyright notices:
8 *     Copyright (c) 2003-2004 Embedded Edge, LLC.
9 *     Portions Copyright (C) 2002 Embedix, Inc
10 *     Copyright 2002 Hewlett-Packard Company
11
12 *  2.6 version of this driver inspired by:
13 *     (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 *     All Rights Reserved.
15 *     (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 *     All Rights Reserved.
17 *
18
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24/* Why is a timer used to detect insert events?
25 *
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/platform_device.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43
44#include <linux/mmc/host.h>
45#include <asm/io.h>
46#include <asm/mach-au1x00/au1000.h>
47#include <asm/mach-au1x00/au1xxx_dbdma.h>
48#include <asm/mach-au1x00/au1100_mmc.h>
49#include <asm/scatterlist.h>
50
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc"
55
56/* Set this to enable special debugging macros */
57
58#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
60#else
61#define DBG(fmt, idx, args...)
62#endif
63
64const struct {
65	u32 iobase;
66	u32 tx_devid, rx_devid;
67	u16 bcsrpwr;
68	u16 bcsrstatus;
69	u16 wpstatus;
70} au1xmmc_card_table[] = {
71	{ SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72	  BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74	{ SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75	  BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78
79#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table))
80
81/* This array stores pointers for the hosts (used by the IRQ handler) */
82struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT];
83static int dma = 1;
84
85#ifdef MODULE
86module_param(dma, bool, 0);
87MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)");
88#endif
89
90static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
91{
92	u32 val = au_readl(HOST_CONFIG(host));
93	val |= mask;
94	au_writel(val, HOST_CONFIG(host));
95	au_sync();
96}
97
98static inline void FLUSH_FIFO(struct au1xmmc_host *host)
99{
100	u32 val = au_readl(HOST_CONFIG2(host));
101
102	au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
103	au_sync_delay(1);
104
105	/* SEND_STOP will turn off clock control - this re-enables it */
106	val &= ~SD_CONFIG2_DF;
107
108	au_writel(val, HOST_CONFIG2(host));
109	au_sync();
110}
111
112static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
113{
114	u32 val = au_readl(HOST_CONFIG(host));
115	val &= ~mask;
116	au_writel(val, HOST_CONFIG(host));
117	au_sync();
118}
119
120static inline void SEND_STOP(struct au1xmmc_host *host)
121{
122
123	/* We know the value of CONFIG2, so avoid a read we don't need */
124	u32 mask = SD_CONFIG2_EN;
125
126	WARN_ON(host->status != HOST_S_DATA);
127	host->status = HOST_S_STOP;
128
129	au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host));
130	au_sync();
131
132	/* Send the stop commmand */
133	au_writel(STOP_CMD, HOST_CMD(host));
134}
135
136static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
137{
138
139	u32 val = au1xmmc_card_table[host->id].bcsrpwr;
140
141	bcsr->board &= ~val;
142	if (state) bcsr->board |= val;
143
144	au_sync_delay(1);
145}
146
147static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
148{
149	return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus)
150		? 1 : 0;
151}
152
153static int au1xmmc_card_readonly(struct mmc_host *mmc)
154{
155	struct au1xmmc_host *host = mmc_priv(mmc);
156	return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
157		? 1 : 0;
158}
159
160static void au1xmmc_finish_request(struct au1xmmc_host *host)
161{
162
163	struct mmc_request *mrq = host->mrq;
164
165	host->mrq = NULL;
166	host->flags &= HOST_F_ACTIVE;
167
168	host->dma.len = 0;
169	host->dma.dir = 0;
170
171	host->pio.index  = 0;
172	host->pio.offset = 0;
173	host->pio.len = 0;
174
175	host->status = HOST_S_IDLE;
176
177	bcsr->disk_leds |= (1 << 8);
178
179	mmc_request_done(host->mmc, mrq);
180}
181
182static void au1xmmc_tasklet_finish(unsigned long param)
183{
184	struct au1xmmc_host *host = (struct au1xmmc_host *) param;
185	au1xmmc_finish_request(host);
186}
187
188static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
189				struct mmc_command *cmd, unsigned int flags)
190{
191	u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
192
193	switch (mmc_resp_type(cmd)) {
194	case MMC_RSP_NONE:
195		break;
196	case MMC_RSP_R1:
197		mmccmd |= SD_CMD_RT_1;
198		break;
199	case MMC_RSP_R1B:
200		mmccmd |= SD_CMD_RT_1B;
201		break;
202	case MMC_RSP_R2:
203		mmccmd |= SD_CMD_RT_2;
204		break;
205	case MMC_RSP_R3:
206		mmccmd |= SD_CMD_RT_3;
207		break;
208	default:
209		printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
210			mmc_resp_type(cmd));
211		return MMC_ERR_INVALID;
212	}
213
214	if (flags & MMC_DATA_READ) {
215		if (flags & MMC_DATA_MULTI)
216			mmccmd |= SD_CMD_CT_4;
217		else
218			mmccmd |= SD_CMD_CT_2;
219	} else if (flags & MMC_DATA_WRITE) {
220		if (flags & MMC_DATA_MULTI)
221			mmccmd |= SD_CMD_CT_3;
222		else
223			mmccmd |= SD_CMD_CT_1;
224	}
225
226	au_writel(cmd->arg, HOST_CMDARG(host));
227	au_sync();
228
229	if (wait)
230		IRQ_OFF(host, SD_CONFIG_CR);
231
232	au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
233	au_sync();
234
235	/* Wait for the command to go on the line */
236
237	while(1) {
238		if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
239			break;
240	}
241
242	/* Wait for the command to come back */
243
244	if (wait) {
245		u32 status = au_readl(HOST_STATUS(host));
246
247		while(!(status & SD_STATUS_CR))
248			status = au_readl(HOST_STATUS(host));
249
250		/* Clear the CR status */
251		au_writel(SD_STATUS_CR, HOST_STATUS(host));
252
253		IRQ_ON(host, SD_CONFIG_CR);
254	}
255
256	return MMC_ERR_NONE;
257}
258
259static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
260{
261
262	struct mmc_request *mrq = host->mrq;
263	struct mmc_data *data;
264	u32 crc;
265
266	WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
267
268	if (host->mrq == NULL)
269		return;
270
271	data = mrq->cmd->data;
272
273	if (status == 0)
274		status = au_readl(HOST_STATUS(host));
275
276	/* The transaction is really over when the SD_STATUS_DB bit is clear */
277
278	while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
279		status = au_readl(HOST_STATUS(host));
280
281	data->error = MMC_ERR_NONE;
282	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
283
284        /* Process any errors */
285
286	crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
287	if (host->flags & HOST_F_XMIT)
288		crc |= ((status & 0x07) == 0x02) ? 0 : 1;
289
290	if (crc)
291		data->error = MMC_ERR_BADCRC;
292
293	/* Clear the CRC bits */
294	au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
295
296	data->bytes_xfered = 0;
297
298	if (data->error == MMC_ERR_NONE) {
299		if (host->flags & HOST_F_DMA) {
300			u32 chan = DMA_CHANNEL(host);
301
302			chan_tab_t *c = *((chan_tab_t **) chan);
303			au1x_dma_chan_t *cp = c->chan_ptr;
304			data->bytes_xfered = cp->ddma_bytecnt;
305		}
306		else
307			data->bytes_xfered =
308				(data->blocks * data->blksz) -
309				host->pio.len;
310	}
311
312	au1xmmc_finish_request(host);
313}
314
315static void au1xmmc_tasklet_data(unsigned long param)
316{
317	struct au1xmmc_host *host = (struct au1xmmc_host *) param;
318
319	u32 status = au_readl(HOST_STATUS(host));
320	au1xmmc_data_complete(host, status);
321}
322
323#define AU1XMMC_MAX_TRANSFER 8
324
325static void au1xmmc_send_pio(struct au1xmmc_host *host)
326{
327
328	struct mmc_data *data = 0;
329	int sg_len, max, count = 0;
330	unsigned char *sg_ptr;
331	u32 status = 0;
332	struct scatterlist *sg;
333
334	data = host->mrq->data;
335
336	if (!(host->flags & HOST_F_XMIT))
337		return;
338
339	/* This is the pointer to the data buffer */
340	sg = &data->sg[host->pio.index];
341	sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
342
343	/* This is the space left inside the buffer */
344	sg_len = data->sg[host->pio.index].length - host->pio.offset;
345
346	/* Check to if we need less then the size of the sg_buffer */
347
348	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
349	if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
350
351	for(count = 0; count < max; count++ ) {
352		unsigned char val;
353
354		status = au_readl(HOST_STATUS(host));
355
356		if (!(status & SD_STATUS_TH))
357			break;
358
359		val = *sg_ptr++;
360
361		au_writel((unsigned long) val, HOST_TXPORT(host));
362		au_sync();
363	}
364
365	host->pio.len -= count;
366	host->pio.offset += count;
367
368	if (count == sg_len) {
369		host->pio.index++;
370		host->pio.offset = 0;
371	}
372
373	if (host->pio.len == 0) {
374		IRQ_OFF(host, SD_CONFIG_TH);
375
376		if (host->flags & HOST_F_STOP)
377			SEND_STOP(host);
378
379		tasklet_schedule(&host->data_task);
380	}
381}
382
383static void au1xmmc_receive_pio(struct au1xmmc_host *host)
384{
385
386	struct mmc_data *data = 0;
387	int sg_len = 0, max = 0, count = 0;
388	unsigned char *sg_ptr = 0;
389	u32 status = 0;
390	struct scatterlist *sg;
391
392	data = host->mrq->data;
393
394	if (!(host->flags & HOST_F_RECV))
395		return;
396
397	max = host->pio.len;
398
399	if (host->pio.index < host->dma.len) {
400		sg = &data->sg[host->pio.index];
401		sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
402
403		/* This is the space left inside the buffer */
404		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
405
406		/* Check to if we need less then the size of the sg_buffer */
407		if (sg_len < max) max = sg_len;
408	}
409
410	if (max > AU1XMMC_MAX_TRANSFER)
411		max = AU1XMMC_MAX_TRANSFER;
412
413	for(count = 0; count < max; count++ ) {
414		u32 val;
415		status = au_readl(HOST_STATUS(host));
416
417		if (!(status & SD_STATUS_NE))
418			break;
419
420		if (status & SD_STATUS_RC) {
421			DBG("RX CRC Error [%d + %d].\n", host->id,
422					host->pio.len, count);
423			break;
424		}
425
426		if (status & SD_STATUS_RO) {
427			DBG("RX Overrun [%d + %d]\n", host->id,
428					host->pio.len, count);
429			break;
430		}
431		else if (status & SD_STATUS_RU) {
432			DBG("RX Underrun [%d + %d]\n", host->id,
433					host->pio.len,	count);
434			break;
435		}
436
437		val = au_readl(HOST_RXPORT(host));
438
439		if (sg_ptr)
440			*sg_ptr++ = (unsigned char) (val & 0xFF);
441	}
442
443	host->pio.len -= count;
444	host->pio.offset += count;
445
446	if (sg_len && count == sg_len) {
447		host->pio.index++;
448		host->pio.offset = 0;
449	}
450
451	if (host->pio.len == 0) {
452		//IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
453		IRQ_OFF(host, SD_CONFIG_NE);
454
455		if (host->flags & HOST_F_STOP)
456			SEND_STOP(host);
457
458		tasklet_schedule(&host->data_task);
459	}
460}
461
462/* static void au1xmmc_cmd_complete
463   This is called when a command has been completed - grab the response
464   and check for errors.  Then start the data transfer if it is indicated.
465*/
466
467static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
468{
469
470	struct mmc_request *mrq = host->mrq;
471	struct mmc_command *cmd;
472	int trans;
473
474	if (!host->mrq)
475		return;
476
477	cmd = mrq->cmd;
478	cmd->error = MMC_ERR_NONE;
479
480	if (cmd->flags & MMC_RSP_PRESENT) {
481		if (cmd->flags & MMC_RSP_136) {
482			u32 r[4];
483			int i;
484
485			r[0] = au_readl(host->iobase + SD_RESP3);
486			r[1] = au_readl(host->iobase + SD_RESP2);
487			r[2] = au_readl(host->iobase + SD_RESP1);
488			r[3] = au_readl(host->iobase + SD_RESP0);
489
490			/* The CRC is omitted from the response, so really
491			 * we only got 120 bytes, but the engine expects
492			 * 128 bits, so we have to shift things up
493			 */
494
495			for(i = 0; i < 4; i++) {
496				cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
497				if (i != 3)
498					cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
499			}
500		} else {
501			/* Techincally, we should be getting all 48 bits of
502			 * the response (SD_RESP1 + SD_RESP2), but because
503			 * our response omits the CRC, our data ends up
504			 * being shifted 8 bits to the right.  In this case,
505			 * that means that the OSR data starts at bit 31,
506			 * so we can just read RESP0 and return that
507			 */
508			cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
509		}
510	}
511
512        /* Figure out errors */
513
514	if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
515		cmd->error = MMC_ERR_BADCRC;
516
517	trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
518
519	if (!trans || cmd->error != MMC_ERR_NONE) {
520
521		IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
522		tasklet_schedule(&host->finish_task);
523		return;
524	}
525
526	host->status = HOST_S_DATA;
527
528	if (host->flags & HOST_F_DMA) {
529		u32 channel = DMA_CHANNEL(host);
530
531		/* Start the DMA as soon as the buffer gets something in it */
532
533		if (host->flags & HOST_F_RECV) {
534			u32 mask = SD_STATUS_DB | SD_STATUS_NE;
535
536			while((status & mask) != mask)
537				status = au_readl(HOST_STATUS(host));
538		}
539
540		au1xxx_dbdma_start(channel);
541	}
542}
543
544static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
545{
546
547	unsigned int pbus = get_au1x00_speed();
548	unsigned int divisor;
549	u32 config;
550
551	/* From databook:
552	   divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
553	*/
554
555	pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
556	pbus /= 2;
557
558	divisor = ((pbus / rate) / 2) - 1;
559
560	config = au_readl(HOST_CONFIG(host));
561
562	config &= ~(SD_CONFIG_DIV);
563	config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
564
565	au_writel(config, HOST_CONFIG(host));
566	au_sync();
567}
568
569static int
570au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
571{
572
573	int datalen = data->blocks * data->blksz;
574
575	if (dma != 0)
576		host->flags |= HOST_F_DMA;
577
578	if (data->flags & MMC_DATA_READ)
579		host->flags |= HOST_F_RECV;
580	else
581		host->flags |= HOST_F_XMIT;
582
583	if (host->mrq->stop)
584		host->flags |= HOST_F_STOP;
585
586	host->dma.dir = DMA_BIDIRECTIONAL;
587
588	host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
589				   data->sg_len, host->dma.dir);
590
591	if (host->dma.len == 0)
592		return MMC_ERR_TIMEOUT;
593
594	au_writel(data->blksz - 1, HOST_BLKSIZE(host));
595
596	if (host->flags & HOST_F_DMA) {
597		int i;
598		u32 channel = DMA_CHANNEL(host);
599
600		au1xxx_dbdma_stop(channel);
601
602		for(i = 0; i < host->dma.len; i++) {
603			u32 ret = 0, flags = DDMA_FLAGS_NOIE;
604			struct scatterlist *sg = &data->sg[i];
605			int sg_len = sg->length;
606
607			int len = (datalen > sg_len) ? sg_len : datalen;
608
609			if (i == host->dma.len - 1)
610				flags = DDMA_FLAGS_IE;
611
612    			if (host->flags & HOST_F_XMIT){
613      				ret = au1xxx_dbdma_put_source_flags(channel,
614					(void *) (page_address(sg->page) +
615						  sg->offset),
616					len, flags);
617			}
618    			else {
619      				ret = au1xxx_dbdma_put_dest_flags(channel,
620					(void *) (page_address(sg->page) +
621						  sg->offset),
622					len, flags);
623			}
624
625    			if (!ret)
626				goto dataerr;
627
628			datalen -= len;
629		}
630	}
631	else {
632		host->pio.index = 0;
633		host->pio.offset = 0;
634		host->pio.len = datalen;
635
636		if (host->flags & HOST_F_XMIT)
637			IRQ_ON(host, SD_CONFIG_TH);
638		else
639			IRQ_ON(host, SD_CONFIG_NE);
640			//IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
641	}
642
643	return MMC_ERR_NONE;
644
645 dataerr:
646	dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir);
647	return MMC_ERR_TIMEOUT;
648}
649
650/* static void au1xmmc_request
651   This actually starts a command or data transaction
652*/
653
654static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
655{
656
657	struct au1xmmc_host *host = mmc_priv(mmc);
658	unsigned int flags = 0;
659	int ret = MMC_ERR_NONE;
660
661	WARN_ON(irqs_disabled());
662	WARN_ON(host->status != HOST_S_IDLE);
663
664	host->mrq = mrq;
665	host->status = HOST_S_CMD;
666
667	bcsr->disk_leds &= ~(1 << 8);
668
669	if (mrq->data) {
670		FLUSH_FIFO(host);
671		flags = mrq->data->flags;
672		ret = au1xmmc_prepare_data(host, mrq->data);
673	}
674
675	if (ret == MMC_ERR_NONE)
676		ret = au1xmmc_send_command(host, 0, mrq->cmd, flags);
677
678	if (ret != MMC_ERR_NONE) {
679		mrq->cmd->error = ret;
680		au1xmmc_finish_request(host);
681	}
682}
683
684static void au1xmmc_reset_controller(struct au1xmmc_host *host)
685{
686
687	/* Apply the clock */
688	au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
689        au_sync_delay(1);
690
691	au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
692	au_sync_delay(5);
693
694	au_writel(~0, HOST_STATUS(host));
695	au_sync();
696
697	au_writel(0, HOST_BLKSIZE(host));
698	au_writel(0x001fffff, HOST_TIMEOUT(host));
699	au_sync();
700
701	au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
702        au_sync();
703
704	au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
705	au_sync_delay(1);
706
707	au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
708	au_sync();
709
710	/* Configure interrupts */
711	au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
712	au_sync();
713}
714
715
716static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
717{
718	struct au1xmmc_host *host = mmc_priv(mmc);
719
720	if (ios->power_mode == MMC_POWER_OFF)
721		au1xmmc_set_power(host, 0);
722	else if (ios->power_mode == MMC_POWER_ON) {
723		au1xmmc_set_power(host, 1);
724	}
725
726	if (ios->clock && ios->clock != host->clock) {
727		au1xmmc_set_clock(host, ios->clock);
728		host->clock = ios->clock;
729	}
730}
731
732static void au1xmmc_dma_callback(int irq, void *dev_id)
733{
734	struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
735
736	/* Avoid spurious interrupts */
737
738	if (!host->mrq)
739		return;
740
741	if (host->flags & HOST_F_STOP)
742		SEND_STOP(host);
743
744	tasklet_schedule(&host->data_task);
745}
746
747#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
748#define STATUS_DATA_IN  (SD_STATUS_NE)
749#define STATUS_DATA_OUT (SD_STATUS_TH)
750
751static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
752{
753
754	u32 status;
755	int i, ret = 0;
756
757	disable_irq(AU1100_SD_IRQ);
758
759	for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
760		struct au1xmmc_host * host = au1xmmc_hosts[i];
761		u32 handled = 1;
762
763		status = au_readl(HOST_STATUS(host));
764
765		if (host->mrq && (status & STATUS_TIMEOUT)) {
766			if (status & SD_STATUS_RAT)
767				host->mrq->cmd->error = MMC_ERR_TIMEOUT;
768
769			else if (status & SD_STATUS_DT)
770				host->mrq->data->error = MMC_ERR_TIMEOUT;
771
772			/* In PIO mode, interrupts might still be enabled */
773			IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
774
775			//IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF);
776			tasklet_schedule(&host->finish_task);
777		}
778		else if (status & (SD_STATUS_CR)) {
779			if (host->status == HOST_S_CMD)
780				au1xmmc_cmd_complete(host,status);
781		}
782		else if (!(host->flags & HOST_F_DMA)) {
783			if ((host->flags & HOST_F_XMIT) &&
784			    (status & STATUS_DATA_OUT))
785				au1xmmc_send_pio(host);
786			else if ((host->flags & HOST_F_RECV) &&
787			    (status & STATUS_DATA_IN))
788				au1xmmc_receive_pio(host);
789		}
790		else if (status & 0x203FBC70) {
791			DBG("Unhandled status %8.8x\n", host->id, status);
792			handled = 0;
793		}
794
795		au_writel(status, HOST_STATUS(host));
796		au_sync();
797
798		ret |= handled;
799	}
800
801	enable_irq(AU1100_SD_IRQ);
802	return ret;
803}
804
805static void au1xmmc_poll_event(unsigned long arg)
806{
807	struct au1xmmc_host *host = (struct au1xmmc_host *) arg;
808
809	int card = au1xmmc_card_inserted(host);
810        int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
811
812	if (card != controller) {
813		host->flags &= ~HOST_F_ACTIVE;
814		if (card) host->flags |= HOST_F_ACTIVE;
815		mmc_detect_change(host->mmc, 0);
816	}
817
818	if (host->mrq != NULL) {
819		u32 status = au_readl(HOST_STATUS(host));
820		DBG("PENDING - %8.8x\n", host->id, status);
821	}
822
823	mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
824}
825
826static dbdev_tab_t au1xmmc_mem_dbdev =
827{
828	DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
829};
830
831static void au1xmmc_init_dma(struct au1xmmc_host *host)
832{
833
834	u32 rxchan, txchan;
835
836	int txid = au1xmmc_card_table[host->id].tx_devid;
837	int rxid = au1xmmc_card_table[host->id].rx_devid;
838
839	/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
840	   of 8 bits.  And since devices are shared, we need to create
841	   our own to avoid freaking out other devices
842	*/
843
844	int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
845
846	txchan = au1xxx_dbdma_chan_alloc(memid, txid,
847					 au1xmmc_dma_callback, (void *) host);
848
849	rxchan = au1xxx_dbdma_chan_alloc(rxid, memid,
850					 au1xmmc_dma_callback, (void *) host);
851
852	au1xxx_dbdma_set_devwidth(txchan, 8);
853	au1xxx_dbdma_set_devwidth(rxchan, 8);
854
855	au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT);
856	au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT);
857
858	host->tx_chan = txchan;
859	host->rx_chan = rxchan;
860}
861
862static const struct mmc_host_ops au1xmmc_ops = {
863	.request	= au1xmmc_request,
864	.set_ios	= au1xmmc_set_ios,
865	.get_ro		= au1xmmc_card_readonly,
866};
867
868static int __devinit au1xmmc_probe(struct platform_device *pdev)
869{
870
871	int i, ret = 0;
872
873	/* THe interrupt is shared among all controllers */
874	ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
875
876	if (ret) {
877		printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
878				AU1100_SD_IRQ, ret);
879		return -ENXIO;
880	}
881
882	disable_irq(AU1100_SD_IRQ);
883
884	for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
885		struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
886		struct au1xmmc_host *host = 0;
887
888		if (!mmc) {
889			printk(DRIVER_NAME "ERROR: no mem for host %d\n", i);
890			au1xmmc_hosts[i] = 0;
891			continue;
892		}
893
894		mmc->ops = &au1xmmc_ops;
895
896		mmc->f_min =   450000;
897		mmc->f_max = 24000000;
898
899		mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
900		mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
901
902		mmc->max_blk_size = 2048;
903		mmc->max_blk_count = 512;
904
905		mmc->ocr_avail = AU1XMMC_OCR;
906
907		host = mmc_priv(mmc);
908		host->mmc = mmc;
909
910		host->id = i;
911		host->iobase = au1xmmc_card_table[host->id].iobase;
912		host->clock = 0;
913		host->power_mode = MMC_POWER_OFF;
914
915		host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0;
916		host->status = HOST_S_IDLE;
917
918		init_timer(&host->timer);
919
920		host->timer.function = au1xmmc_poll_event;
921		host->timer.data = (unsigned long) host;
922		host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
923
924		tasklet_init(&host->data_task, au1xmmc_tasklet_data,
925				(unsigned long) host);
926
927		tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
928				(unsigned long) host);
929
930		spin_lock_init(&host->lock);
931
932		if (dma != 0)
933			au1xmmc_init_dma(host);
934
935		au1xmmc_reset_controller(host);
936
937		mmc_add_host(mmc);
938		au1xmmc_hosts[i] = host;
939
940		add_timer(&host->timer);
941
942		printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n",
943		       host->id, host->iobase, dma ? "dma" : "pio");
944	}
945
946	enable_irq(AU1100_SD_IRQ);
947
948	return 0;
949}
950
951static int __devexit au1xmmc_remove(struct platform_device *pdev)
952{
953
954	int i;
955
956	disable_irq(AU1100_SD_IRQ);
957
958	for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
959		struct au1xmmc_host *host = au1xmmc_hosts[i];
960		if (!host) continue;
961
962		tasklet_kill(&host->data_task);
963		tasklet_kill(&host->finish_task);
964
965		del_timer_sync(&host->timer);
966		au1xmmc_set_power(host, 0);
967
968		mmc_remove_host(host->mmc);
969
970		au1xxx_dbdma_chan_free(host->tx_chan);
971		au1xxx_dbdma_chan_free(host->rx_chan);
972
973		au_writel(0x0, HOST_ENABLE(host));
974		au_sync();
975	}
976
977	free_irq(AU1100_SD_IRQ, 0);
978	return 0;
979}
980
981static struct platform_driver au1xmmc_driver = {
982	.probe         = au1xmmc_probe,
983	.remove        = au1xmmc_remove,
984	.suspend       = NULL,
985	.resume        = NULL,
986	.driver        = {
987		.name  = DRIVER_NAME,
988	},
989};
990
991static int __init au1xmmc_init(void)
992{
993	return platform_driver_register(&au1xmmc_driver);
994}
995
996static void __exit au1xmmc_exit(void)
997{
998	platform_driver_unregister(&au1xmmc_driver);
999}
1000
1001module_init(au1xmmc_init);
1002module_exit(au1xmmc_exit);
1003
1004#ifdef MODULE
1005MODULE_AUTHOR("Advanced Micro Devices, Inc");
1006MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1007MODULE_LICENSE("GPL");
1008#endif
1009