1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
4 *
5 *  Copyright (c) 2005, Advanced Micro Devices, Inc.
6 *
7 *  Developed with help from the 2.4.30 MMC AU1XXX controller including
8 *  the following copyright notices:
9 *     Copyright (c) 2003-2004 Embedded Edge, LLC.
10 *     Portions Copyright (C) 2002 Embedix, Inc
11 *     Copyright 2002 Hewlett-Packard Company
12
13 *  2.6 version of this driver inspired by:
14 *     (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
15 *     All Rights Reserved.
16 *     (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
17 *     All Rights Reserved.
18 *
19
20 */
21
22/* Why don't we use the SD controllers' carddetect feature?
23 *
24 * From the AU1100 MMC application guide:
25 * If the Au1100-based design is intended to support both MultiMediaCards
26 * and 1- or 4-data bit SecureDigital cards, then the solution is to
27 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
28 * In doing so, a MMC card never enters SPI-mode communications,
29 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
30 * (the low to high transition will not occur).
31 */
32
33#include <linux/clk.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/platform_device.h>
37#include <linux/mm.h>
38#include <linux/interrupt.h>
39#include <linux/dma-mapping.h>
40#include <linux/scatterlist.h>
41#include <linux/highmem.h>
42#include <linux/leds.h>
43#include <linux/mmc/host.h>
44#include <linux/slab.h>
45
46#include <asm/io.h>
47#include <asm/mach-au1x00/au1000.h>
48#include <asm/mach-au1x00/au1xxx_dbdma.h>
49#include <asm/mach-au1x00/au1100_mmc.h>
50
51#define DRIVER_NAME "au1xxx-mmc"
52
53/* Set this to enable special debugging macros */
54/* #define DEBUG */
55
56#ifdef DEBUG
57#define DBG(fmt, idx, args...)	\
58	pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
59#else
60#define DBG(fmt, idx, args...) do {} while (0)
61#endif
62
63/* Hardware definitions */
64#define AU1XMMC_DESCRIPTOR_COUNT 1
65
66/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
67#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
68#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
69
70#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
71		     MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
72		     MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
73
74/* This gives us a hard value for the stop command that we can write directly
75 * to the command register.
76 */
77#define STOP_CMD	\
78	(SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
79
80/* This is the set of interrupts that we configure by default. */
81#define AU1XMMC_INTERRUPTS 				\
82	(SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT |	\
83	 SD_CONFIG_CR | SD_CONFIG_I)
84
85/* The poll event (looking for insert/remove events runs twice a second. */
86#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
87
88struct au1xmmc_host {
89	struct mmc_host *mmc;
90	struct mmc_request *mrq;
91
92	u32 flags;
93	void __iomem *iobase;
94	u32 clock;
95	u32 bus_width;
96	u32 power_mode;
97
98	int status;
99
100	struct {
101		int len;
102		int dir;
103	} dma;
104
105	struct {
106		int index;
107		int offset;
108		int len;
109	} pio;
110
111	u32 tx_chan;
112	u32 rx_chan;
113
114	int irq;
115
116	struct tasklet_struct finish_task;
117	struct tasklet_struct data_task;
118	struct au1xmmc_platform_data *platdata;
119	struct platform_device *pdev;
120	struct resource *ioarea;
121	struct clk *clk;
122};
123
124/* Status flags used by the host structure */
125#define HOST_F_XMIT	0x0001
126#define HOST_F_RECV	0x0002
127#define HOST_F_DMA	0x0010
128#define HOST_F_DBDMA	0x0020
129#define HOST_F_ACTIVE	0x0100
130#define HOST_F_STOP	0x1000
131
132#define HOST_S_IDLE	0x0001
133#define HOST_S_CMD	0x0002
134#define HOST_S_DATA	0x0003
135#define HOST_S_STOP	0x0004
136
137/* Easy access macros */
138#define HOST_STATUS(h)	((h)->iobase + SD_STATUS)
139#define HOST_CONFIG(h)	((h)->iobase + SD_CONFIG)
140#define HOST_ENABLE(h)	((h)->iobase + SD_ENABLE)
141#define HOST_TXPORT(h)	((h)->iobase + SD_TXPORT)
142#define HOST_RXPORT(h)	((h)->iobase + SD_RXPORT)
143#define HOST_CMDARG(h)	((h)->iobase + SD_CMDARG)
144#define HOST_BLKSIZE(h)	((h)->iobase + SD_BLKSIZE)
145#define HOST_CMD(h)	((h)->iobase + SD_CMD)
146#define HOST_CONFIG2(h)	((h)->iobase + SD_CONFIG2)
147#define HOST_TIMEOUT(h)	((h)->iobase + SD_TIMEOUT)
148#define HOST_DEBUG(h)	((h)->iobase + SD_DEBUG)
149
150#define DMA_CHANNEL(h)	\
151	(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
152
153static inline int has_dbdma(void)
154{
155	switch (alchemy_get_cputype()) {
156	case ALCHEMY_CPU_AU1200:
157	case ALCHEMY_CPU_AU1300:
158		return 1;
159	default:
160		return 0;
161	}
162}
163
164static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
165{
166	u32 val = __raw_readl(HOST_CONFIG(host));
167	val |= mask;
168	__raw_writel(val, HOST_CONFIG(host));
169	wmb(); /* drain writebuffer */
170}
171
172static inline void FLUSH_FIFO(struct au1xmmc_host *host)
173{
174	u32 val = __raw_readl(HOST_CONFIG2(host));
175
176	__raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
177	wmb(); /* drain writebuffer */
178	mdelay(1);
179
180	/* SEND_STOP will turn off clock control - this re-enables it */
181	val &= ~SD_CONFIG2_DF;
182
183	__raw_writel(val, HOST_CONFIG2(host));
184	wmb(); /* drain writebuffer */
185}
186
187static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
188{
189	u32 val = __raw_readl(HOST_CONFIG(host));
190	val &= ~mask;
191	__raw_writel(val, HOST_CONFIG(host));
192	wmb(); /* drain writebuffer */
193}
194
195static inline void SEND_STOP(struct au1xmmc_host *host)
196{
197	u32 config2;
198
199	WARN_ON(host->status != HOST_S_DATA);
200	host->status = HOST_S_STOP;
201
202	config2 = __raw_readl(HOST_CONFIG2(host));
203	__raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
204	wmb(); /* drain writebuffer */
205
206	/* Send the stop command */
207	__raw_writel(STOP_CMD, HOST_CMD(host));
208	wmb(); /* drain writebuffer */
209}
210
211static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
212{
213	if (host->platdata && host->platdata->set_power)
214		host->platdata->set_power(host->mmc, state);
215}
216
217static int au1xmmc_card_inserted(struct mmc_host *mmc)
218{
219	struct au1xmmc_host *host = mmc_priv(mmc);
220
221	if (host->platdata && host->platdata->card_inserted)
222		return !!host->platdata->card_inserted(host->mmc);
223
224	return -ENOSYS;
225}
226
227static int au1xmmc_card_readonly(struct mmc_host *mmc)
228{
229	struct au1xmmc_host *host = mmc_priv(mmc);
230
231	if (host->platdata && host->platdata->card_readonly)
232		return !!host->platdata->card_readonly(mmc);
233
234	return -ENOSYS;
235}
236
237static void au1xmmc_finish_request(struct au1xmmc_host *host)
238{
239	struct mmc_request *mrq = host->mrq;
240
241	host->mrq = NULL;
242	host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
243
244	host->dma.len = 0;
245	host->dma.dir = 0;
246
247	host->pio.index  = 0;
248	host->pio.offset = 0;
249	host->pio.len = 0;
250
251	host->status = HOST_S_IDLE;
252
253	mmc_request_done(host->mmc, mrq);
254}
255
256static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
257{
258	struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
259	au1xmmc_finish_request(host);
260}
261
262static int au1xmmc_send_command(struct au1xmmc_host *host,
263				struct mmc_command *cmd, struct mmc_data *data)
264{
265	u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
266
267	switch (mmc_resp_type(cmd)) {
268	case MMC_RSP_NONE:
269		break;
270	case MMC_RSP_R1:
271		mmccmd |= SD_CMD_RT_1;
272		break;
273	case MMC_RSP_R1B:
274		mmccmd |= SD_CMD_RT_1B;
275		break;
276	case MMC_RSP_R2:
277		mmccmd |= SD_CMD_RT_2;
278		break;
279	case MMC_RSP_R3:
280		mmccmd |= SD_CMD_RT_3;
281		break;
282	default:
283		pr_info("au1xmmc: unhandled response type %02x\n",
284			mmc_resp_type(cmd));
285		return -EINVAL;
286	}
287
288	if (data) {
289		if (data->flags & MMC_DATA_READ) {
290			if (data->blocks > 1)
291				mmccmd |= SD_CMD_CT_4;
292			else
293				mmccmd |= SD_CMD_CT_2;
294		} else if (data->flags & MMC_DATA_WRITE) {
295			if (data->blocks > 1)
296				mmccmd |= SD_CMD_CT_3;
297			else
298				mmccmd |= SD_CMD_CT_1;
299		}
300	}
301
302	__raw_writel(cmd->arg, HOST_CMDARG(host));
303	wmb(); /* drain writebuffer */
304
305	__raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
306	wmb(); /* drain writebuffer */
307
308	/* Wait for the command to go on the line */
309	while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
310		/* nop */;
311
312	return 0;
313}
314
315static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
316{
317	struct mmc_request *mrq = host->mrq;
318	struct mmc_data *data;
319	u32 crc;
320
321	WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
322
323	if (host->mrq == NULL)
324		return;
325
326	data = mrq->cmd->data;
327
328	if (status == 0)
329		status = __raw_readl(HOST_STATUS(host));
330
331	/* The transaction is really over when the SD_STATUS_DB bit is clear */
332	while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
333		status = __raw_readl(HOST_STATUS(host));
334
335	data->error = 0;
336	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
337
338        /* Process any errors */
339	crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
340	if (host->flags & HOST_F_XMIT)
341		crc |= ((status & 0x07) == 0x02) ? 0 : 1;
342
343	if (crc)
344		data->error = -EILSEQ;
345
346	/* Clear the CRC bits */
347	__raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
348
349	data->bytes_xfered = 0;
350
351	if (!data->error) {
352		if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
353			u32 chan = DMA_CHANNEL(host);
354
355			chan_tab_t *c = *((chan_tab_t **)chan);
356			au1x_dma_chan_t *cp = c->chan_ptr;
357			data->bytes_xfered = cp->ddma_bytecnt;
358		} else
359			data->bytes_xfered =
360				(data->blocks * data->blksz) - host->pio.len;
361	}
362
363	au1xmmc_finish_request(host);
364}
365
366static void au1xmmc_tasklet_data(struct tasklet_struct *t)
367{
368	struct au1xmmc_host *host = from_tasklet(host, t, data_task);
369
370	u32 status = __raw_readl(HOST_STATUS(host));
371	au1xmmc_data_complete(host, status);
372}
373
374#define AU1XMMC_MAX_TRANSFER 8
375
376static void au1xmmc_send_pio(struct au1xmmc_host *host)
377{
378	struct mmc_data *data;
379	int sg_len, max, count;
380	unsigned char *sg_ptr, val;
381	u32 status;
382	struct scatterlist *sg;
383
384	data = host->mrq->data;
385
386	if (!(host->flags & HOST_F_XMIT))
387		return;
388
389	/* This is the pointer to the data buffer */
390	sg = &data->sg[host->pio.index];
391	sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
392
393	/* This is the space left inside the buffer */
394	sg_len = data->sg[host->pio.index].length - host->pio.offset;
395
396	/* Check if we need less than the size of the sg_buffer */
397	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
398	if (max > AU1XMMC_MAX_TRANSFER)
399		max = AU1XMMC_MAX_TRANSFER;
400
401	for (count = 0; count < max; count++) {
402		status = __raw_readl(HOST_STATUS(host));
403
404		if (!(status & SD_STATUS_TH))
405			break;
406
407		val = sg_ptr[count];
408
409		__raw_writel((unsigned long)val, HOST_TXPORT(host));
410		wmb(); /* drain writebuffer */
411	}
412	kunmap_local(sg_ptr);
413
414	host->pio.len -= count;
415	host->pio.offset += count;
416
417	if (count == sg_len) {
418		host->pio.index++;
419		host->pio.offset = 0;
420	}
421
422	if (host->pio.len == 0) {
423		IRQ_OFF(host, SD_CONFIG_TH);
424
425		if (host->flags & HOST_F_STOP)
426			SEND_STOP(host);
427
428		tasklet_schedule(&host->data_task);
429	}
430}
431
432static void au1xmmc_receive_pio(struct au1xmmc_host *host)
433{
434	struct mmc_data *data;
435	int max, count, sg_len = 0;
436	unsigned char *sg_ptr = NULL;
437	u32 status, val;
438	struct scatterlist *sg;
439
440	data = host->mrq->data;
441
442	if (!(host->flags & HOST_F_RECV))
443		return;
444
445	max = host->pio.len;
446
447	if (host->pio.index < host->dma.len) {
448		sg = &data->sg[host->pio.index];
449		sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
450
451		/* This is the space left inside the buffer */
452		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
453
454		/* Check if we need less than the size of the sg_buffer */
455		if (sg_len < max)
456			max = sg_len;
457	}
458
459	if (max > AU1XMMC_MAX_TRANSFER)
460		max = AU1XMMC_MAX_TRANSFER;
461
462	for (count = 0; count < max; count++) {
463		status = __raw_readl(HOST_STATUS(host));
464
465		if (!(status & SD_STATUS_NE))
466			break;
467
468		if (status & SD_STATUS_RC) {
469			DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
470					host->pio.len, count);
471			break;
472		}
473
474		if (status & SD_STATUS_RO) {
475			DBG("RX Overrun [%d + %d]\n", host->pdev->id,
476					host->pio.len, count);
477			break;
478		}
479		else if (status & SD_STATUS_RU) {
480			DBG("RX Underrun [%d + %d]\n", host->pdev->id,
481					host->pio.len,	count);
482			break;
483		}
484
485		val = __raw_readl(HOST_RXPORT(host));
486
487		if (sg_ptr)
488			sg_ptr[count] = (unsigned char)(val & 0xFF);
489	}
490	if (sg_ptr)
491		kunmap_local(sg_ptr);
492
493	host->pio.len -= count;
494	host->pio.offset += count;
495
496	if (sg_len && count == sg_len) {
497		host->pio.index++;
498		host->pio.offset = 0;
499	}
500
501	if (host->pio.len == 0) {
502		/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
503		IRQ_OFF(host, SD_CONFIG_NE);
504
505		if (host->flags & HOST_F_STOP)
506			SEND_STOP(host);
507
508		tasklet_schedule(&host->data_task);
509	}
510}
511
512/* This is called when a command has been completed - grab the response
513 * and check for errors.  Then start the data transfer if it is indicated.
514 */
515static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
516{
517	struct mmc_request *mrq = host->mrq;
518	struct mmc_command *cmd;
519	u32 r[4];
520	int i, trans;
521
522	if (!host->mrq)
523		return;
524
525	cmd = mrq->cmd;
526	cmd->error = 0;
527
528	if (cmd->flags & MMC_RSP_PRESENT) {
529		if (cmd->flags & MMC_RSP_136) {
530			r[0] = __raw_readl(host->iobase + SD_RESP3);
531			r[1] = __raw_readl(host->iobase + SD_RESP2);
532			r[2] = __raw_readl(host->iobase + SD_RESP1);
533			r[3] = __raw_readl(host->iobase + SD_RESP0);
534
535			/* The CRC is omitted from the response, so really
536			 * we only got 120 bytes, but the engine expects
537			 * 128 bits, so we have to shift things up.
538			 */
539			for (i = 0; i < 4; i++) {
540				cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
541				if (i != 3)
542					cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
543			}
544		} else {
545			/* Techincally, we should be getting all 48 bits of
546			 * the response (SD_RESP1 + SD_RESP2), but because
547			 * our response omits the CRC, our data ends up
548			 * being shifted 8 bits to the right.  In this case,
549			 * that means that the OSR data starts at bit 31,
550			 * so we can just read RESP0 and return that.
551			 */
552			cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
553		}
554	}
555
556        /* Figure out errors */
557	if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
558		cmd->error = -EILSEQ;
559
560	trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
561
562	if (!trans || cmd->error) {
563		IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
564		tasklet_schedule(&host->finish_task);
565		return;
566	}
567
568	host->status = HOST_S_DATA;
569
570	if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
571		u32 channel = DMA_CHANNEL(host);
572
573		/* Start the DBDMA as soon as the buffer gets something in it */
574
575		if (host->flags & HOST_F_RECV) {
576			u32 mask = SD_STATUS_DB | SD_STATUS_NE;
577
578			while((status & mask) != mask)
579				status = __raw_readl(HOST_STATUS(host));
580		}
581
582		au1xxx_dbdma_start(channel);
583	}
584}
585
586static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
587{
588	unsigned int pbus = clk_get_rate(host->clk);
589	unsigned int divisor = ((pbus / rate) / 2) - 1;
590	u32 config;
591
592	config = __raw_readl(HOST_CONFIG(host));
593
594	config &= ~(SD_CONFIG_DIV);
595	config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
596
597	__raw_writel(config, HOST_CONFIG(host));
598	wmb(); /* drain writebuffer */
599}
600
601static int au1xmmc_prepare_data(struct au1xmmc_host *host,
602				struct mmc_data *data)
603{
604	int datalen = data->blocks * data->blksz;
605
606	if (data->flags & MMC_DATA_READ)
607		host->flags |= HOST_F_RECV;
608	else
609		host->flags |= HOST_F_XMIT;
610
611	if (host->mrq->stop)
612		host->flags |= HOST_F_STOP;
613
614	host->dma.dir = DMA_BIDIRECTIONAL;
615
616	host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
617				   data->sg_len, host->dma.dir);
618
619	if (host->dma.len == 0)
620		return -ETIMEDOUT;
621
622	__raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
623
624	if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
625		int i;
626		u32 channel = DMA_CHANNEL(host);
627
628		au1xxx_dbdma_stop(channel);
629
630		for (i = 0; i < host->dma.len; i++) {
631			u32 ret = 0, flags = DDMA_FLAGS_NOIE;
632			struct scatterlist *sg = &data->sg[i];
633			int sg_len = sg->length;
634
635			int len = (datalen > sg_len) ? sg_len : datalen;
636
637			if (i == host->dma.len - 1)
638				flags = DDMA_FLAGS_IE;
639
640			if (host->flags & HOST_F_XMIT) {
641				ret = au1xxx_dbdma_put_source(channel,
642					sg_phys(sg), len, flags);
643			} else {
644				ret = au1xxx_dbdma_put_dest(channel,
645					sg_phys(sg), len, flags);
646			}
647
648			if (!ret)
649				goto dataerr;
650
651			datalen -= len;
652		}
653	} else {
654		host->pio.index = 0;
655		host->pio.offset = 0;
656		host->pio.len = datalen;
657
658		if (host->flags & HOST_F_XMIT)
659			IRQ_ON(host, SD_CONFIG_TH);
660		else
661			IRQ_ON(host, SD_CONFIG_NE);
662			/* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
663	}
664
665	return 0;
666
667dataerr:
668	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
669			host->dma.dir);
670	return -ETIMEDOUT;
671}
672
673/* This actually starts a command or data transaction */
674static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
675{
676	struct au1xmmc_host *host = mmc_priv(mmc);
677	int ret = 0;
678
679	WARN_ON(irqs_disabled());
680	WARN_ON(host->status != HOST_S_IDLE);
681
682	host->mrq = mrq;
683	host->status = HOST_S_CMD;
684
685	/* fail request immediately if no card is present */
686	if (0 == au1xmmc_card_inserted(mmc)) {
687		mrq->cmd->error = -ENOMEDIUM;
688		au1xmmc_finish_request(host);
689		return;
690	}
691
692	if (mrq->data) {
693		FLUSH_FIFO(host);
694		ret = au1xmmc_prepare_data(host, mrq->data);
695	}
696
697	if (!ret)
698		ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
699
700	if (ret) {
701		mrq->cmd->error = ret;
702		au1xmmc_finish_request(host);
703	}
704}
705
706static void au1xmmc_reset_controller(struct au1xmmc_host *host)
707{
708	/* Apply the clock */
709	__raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
710	wmb(); /* drain writebuffer */
711	mdelay(1);
712
713	__raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
714	wmb(); /* drain writebuffer */
715	mdelay(5);
716
717	__raw_writel(~0, HOST_STATUS(host));
718	wmb(); /* drain writebuffer */
719
720	__raw_writel(0, HOST_BLKSIZE(host));
721	__raw_writel(0x001fffff, HOST_TIMEOUT(host));
722	wmb(); /* drain writebuffer */
723
724	__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
725	wmb(); /* drain writebuffer */
726
727	__raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
728	wmb(); /* drain writebuffer */
729	mdelay(1);
730
731	__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
732	wmb(); /* drain writebuffer */
733
734	/* Configure interrupts */
735	__raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
736	wmb(); /* drain writebuffer */
737}
738
739
740static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
741{
742	struct au1xmmc_host *host = mmc_priv(mmc);
743	u32 config2;
744
745	if (ios->power_mode == MMC_POWER_OFF)
746		au1xmmc_set_power(host, 0);
747	else if (ios->power_mode == MMC_POWER_ON) {
748		au1xmmc_set_power(host, 1);
749	}
750
751	if (ios->clock && ios->clock != host->clock) {
752		au1xmmc_set_clock(host, ios->clock);
753		host->clock = ios->clock;
754	}
755
756	config2 = __raw_readl(HOST_CONFIG2(host));
757	switch (ios->bus_width) {
758	case MMC_BUS_WIDTH_8:
759		config2 |= SD_CONFIG2_BB;
760		break;
761	case MMC_BUS_WIDTH_4:
762		config2 &= ~SD_CONFIG2_BB;
763		config2 |= SD_CONFIG2_WB;
764		break;
765	case MMC_BUS_WIDTH_1:
766		config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
767		break;
768	}
769	__raw_writel(config2, HOST_CONFIG2(host));
770	wmb(); /* drain writebuffer */
771}
772
773#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
774#define STATUS_DATA_IN  (SD_STATUS_NE)
775#define STATUS_DATA_OUT (SD_STATUS_TH)
776
777static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
778{
779	struct au1xmmc_host *host = dev_id;
780	u32 status;
781
782	status = __raw_readl(HOST_STATUS(host));
783
784	if (!(status & SD_STATUS_I))
785		return IRQ_NONE;	/* not ours */
786
787	if (status & SD_STATUS_SI)	/* SDIO */
788		mmc_signal_sdio_irq(host->mmc);
789
790	if (host->mrq && (status & STATUS_TIMEOUT)) {
791		if (status & SD_STATUS_RAT)
792			host->mrq->cmd->error = -ETIMEDOUT;
793		else if (status & SD_STATUS_DT)
794			host->mrq->data->error = -ETIMEDOUT;
795
796		/* In PIO mode, interrupts might still be enabled */
797		IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
798
799		/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
800		tasklet_schedule(&host->finish_task);
801	}
802#if 0
803	else if (status & SD_STATUS_DD) {
804		/* Sometimes we get a DD before a NE in PIO mode */
805		if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
806			au1xmmc_receive_pio(host);
807		else {
808			au1xmmc_data_complete(host, status);
809			/* tasklet_schedule(&host->data_task); */
810		}
811	}
812#endif
813	else if (status & SD_STATUS_CR) {
814		if (host->status == HOST_S_CMD)
815			au1xmmc_cmd_complete(host, status);
816
817	} else if (!(host->flags & HOST_F_DMA)) {
818		if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
819			au1xmmc_send_pio(host);
820		else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
821			au1xmmc_receive_pio(host);
822
823	} else if (status & 0x203F3C70) {
824			DBG("Unhandled status %8.8x\n", host->pdev->id,
825				status);
826	}
827
828	__raw_writel(status, HOST_STATUS(host));
829	wmb(); /* drain writebuffer */
830
831	return IRQ_HANDLED;
832}
833
834/* 8bit memory DMA device */
835static dbdev_tab_t au1xmmc_mem_dbdev = {
836	.dev_id		= DSCR_CMD0_ALWAYS,
837	.dev_flags	= DEV_FLAGS_ANYUSE,
838	.dev_tsize	= 0,
839	.dev_devwidth	= 8,
840	.dev_physaddr	= 0x00000000,
841	.dev_intlevel	= 0,
842	.dev_intpolarity = 0,
843};
844static int memid;
845
846static void au1xmmc_dbdma_callback(int irq, void *dev_id)
847{
848	struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
849
850	/* Avoid spurious interrupts */
851	if (!host->mrq)
852		return;
853
854	if (host->flags & HOST_F_STOP)
855		SEND_STOP(host);
856
857	tasklet_schedule(&host->data_task);
858}
859
860static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
861{
862	struct resource *res;
863	int txid, rxid;
864
865	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
866	if (!res)
867		return -ENODEV;
868	txid = res->start;
869
870	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
871	if (!res)
872		return -ENODEV;
873	rxid = res->start;
874
875	if (!memid)
876		return -ENODEV;
877
878	host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
879				au1xmmc_dbdma_callback, (void *)host);
880	if (!host->tx_chan) {
881		dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
882		return -ENODEV;
883	}
884
885	host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
886				au1xmmc_dbdma_callback, (void *)host);
887	if (!host->rx_chan) {
888		dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
889		au1xxx_dbdma_chan_free(host->tx_chan);
890		return -ENODEV;
891	}
892
893	au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
894	au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
895
896	au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
897	au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
898
899	/* DBDMA is good to go */
900	host->flags |= HOST_F_DMA | HOST_F_DBDMA;
901
902	return 0;
903}
904
905static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
906{
907	if (host->flags & HOST_F_DMA) {
908		host->flags &= ~HOST_F_DMA;
909		au1xxx_dbdma_chan_free(host->tx_chan);
910		au1xxx_dbdma_chan_free(host->rx_chan);
911	}
912}
913
914static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
915{
916	struct au1xmmc_host *host = mmc_priv(mmc);
917
918	if (en)
919		IRQ_ON(host, SD_CONFIG_SI);
920	else
921		IRQ_OFF(host, SD_CONFIG_SI);
922}
923
924static const struct mmc_host_ops au1xmmc_ops = {
925	.request	= au1xmmc_request,
926	.set_ios	= au1xmmc_set_ios,
927	.get_ro		= au1xmmc_card_readonly,
928	.get_cd		= au1xmmc_card_inserted,
929	.enable_sdio_irq = au1xmmc_enable_sdio_irq,
930};
931
932static int au1xmmc_probe(struct platform_device *pdev)
933{
934	struct mmc_host *mmc;
935	struct au1xmmc_host *host;
936	struct resource *r;
937	int ret, iflag;
938
939	mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
940	if (!mmc) {
941		dev_err(&pdev->dev, "no memory for mmc_host\n");
942		ret = -ENOMEM;
943		goto out0;
944	}
945
946	host = mmc_priv(mmc);
947	host->mmc = mmc;
948	host->platdata = pdev->dev.platform_data;
949	host->pdev = pdev;
950
951	ret = -ENODEV;
952	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
953	if (!r) {
954		dev_err(&pdev->dev, "no mmio defined\n");
955		goto out1;
956	}
957
958	host->ioarea = request_mem_region(r->start, resource_size(r),
959					   pdev->name);
960	if (!host->ioarea) {
961		dev_err(&pdev->dev, "mmio already in use\n");
962		goto out1;
963	}
964
965	host->iobase = ioremap(r->start, 0x3c);
966	if (!host->iobase) {
967		dev_err(&pdev->dev, "cannot remap mmio\n");
968		goto out2;
969	}
970
971	host->irq = platform_get_irq(pdev, 0);
972	if (host->irq < 0) {
973		ret = host->irq;
974		goto out3;
975	}
976
977	mmc->ops = &au1xmmc_ops;
978
979	mmc->f_min =   450000;
980	mmc->f_max = 24000000;
981
982	mmc->max_blk_size = 2048;
983	mmc->max_blk_count = 512;
984
985	mmc->ocr_avail = AU1XMMC_OCR;
986	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
987	mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
988
989	iflag = IRQF_SHARED;	/* Au1100/Au1200: one int for both ctrls */
990
991	switch (alchemy_get_cputype()) {
992	case ALCHEMY_CPU_AU1100:
993		mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
994		break;
995	case ALCHEMY_CPU_AU1200:
996		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
997		break;
998	case ALCHEMY_CPU_AU1300:
999		iflag = 0;	/* nothing is shared */
1000		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
1001		mmc->f_max = 52000000;
1002		if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
1003			mmc->caps |= MMC_CAP_8_BIT_DATA;
1004		break;
1005	}
1006
1007	ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
1008	if (ret) {
1009		dev_err(&pdev->dev, "cannot grab IRQ\n");
1010		goto out3;
1011	}
1012
1013	host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
1014	if (IS_ERR(host->clk)) {
1015		dev_err(&pdev->dev, "cannot find clock\n");
1016		ret = PTR_ERR(host->clk);
1017		goto out_irq;
1018	}
1019
1020	ret = clk_prepare_enable(host->clk);
1021	if (ret) {
1022		dev_err(&pdev->dev, "cannot enable clock\n");
1023		goto out_clk;
1024	}
1025
1026	host->status = HOST_S_IDLE;
1027
1028	/* board-specific carddetect setup, if any */
1029	if (host->platdata && host->platdata->cd_setup) {
1030		ret = host->platdata->cd_setup(mmc, 1);
1031		if (ret) {
1032			dev_warn(&pdev->dev, "board CD setup failed\n");
1033			mmc->caps |= MMC_CAP_NEEDS_POLL;
1034		}
1035	} else
1036		mmc->caps |= MMC_CAP_NEEDS_POLL;
1037
1038	/* platform may not be able to use all advertised caps */
1039	if (host->platdata)
1040		mmc->caps &= ~(host->platdata->mask_host_caps);
1041
1042	tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
1043
1044	tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
1045
1046	if (has_dbdma()) {
1047		ret = au1xmmc_dbdma_init(host);
1048		if (ret)
1049			pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
1050	}
1051
1052#ifdef CONFIG_LEDS_CLASS
1053	if (host->platdata && host->platdata->led) {
1054		struct led_classdev *led = host->platdata->led;
1055		led->name = mmc_hostname(mmc);
1056		led->brightness = LED_OFF;
1057		led->default_trigger = mmc_hostname(mmc);
1058		ret = led_classdev_register(mmc_dev(mmc), led);
1059		if (ret)
1060			goto out5;
1061	}
1062#endif
1063
1064	au1xmmc_reset_controller(host);
1065
1066	ret = mmc_add_host(mmc);
1067	if (ret) {
1068		dev_err(&pdev->dev, "cannot add mmc host\n");
1069		goto out6;
1070	}
1071
1072	platform_set_drvdata(pdev, host);
1073
1074	pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
1075		" (mode=%s)\n", pdev->id, host->iobase,
1076		host->flags & HOST_F_DMA ? "dma" : "pio");
1077
1078	return 0;	/* all ok */
1079
1080out6:
1081#ifdef CONFIG_LEDS_CLASS
1082	if (host->platdata && host->platdata->led)
1083		led_classdev_unregister(host->platdata->led);
1084out5:
1085#endif
1086	__raw_writel(0, HOST_ENABLE(host));
1087	__raw_writel(0, HOST_CONFIG(host));
1088	__raw_writel(0, HOST_CONFIG2(host));
1089	wmb(); /* drain writebuffer */
1090
1091	if (host->flags & HOST_F_DBDMA)
1092		au1xmmc_dbdma_shutdown(host);
1093
1094	tasklet_kill(&host->data_task);
1095	tasklet_kill(&host->finish_task);
1096
1097	if (host->platdata && host->platdata->cd_setup &&
1098	    !(mmc->caps & MMC_CAP_NEEDS_POLL))
1099		host->platdata->cd_setup(mmc, 0);
1100
1101	clk_disable_unprepare(host->clk);
1102out_clk:
1103	clk_put(host->clk);
1104out_irq:
1105	free_irq(host->irq, host);
1106out3:
1107	iounmap((void *)host->iobase);
1108out2:
1109	release_resource(host->ioarea);
1110	kfree(host->ioarea);
1111out1:
1112	mmc_free_host(mmc);
1113out0:
1114	return ret;
1115}
1116
1117static void au1xmmc_remove(struct platform_device *pdev)
1118{
1119	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1120
1121	if (host) {
1122		mmc_remove_host(host->mmc);
1123
1124#ifdef CONFIG_LEDS_CLASS
1125		if (host->platdata && host->platdata->led)
1126			led_classdev_unregister(host->platdata->led);
1127#endif
1128
1129		if (host->platdata && host->platdata->cd_setup &&
1130		    !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1131			host->platdata->cd_setup(host->mmc, 0);
1132
1133		__raw_writel(0, HOST_ENABLE(host));
1134		__raw_writel(0, HOST_CONFIG(host));
1135		__raw_writel(0, HOST_CONFIG2(host));
1136		wmb(); /* drain writebuffer */
1137
1138		tasklet_kill(&host->data_task);
1139		tasklet_kill(&host->finish_task);
1140
1141		if (host->flags & HOST_F_DBDMA)
1142			au1xmmc_dbdma_shutdown(host);
1143
1144		au1xmmc_set_power(host, 0);
1145
1146		clk_disable_unprepare(host->clk);
1147		clk_put(host->clk);
1148
1149		free_irq(host->irq, host);
1150		iounmap((void *)host->iobase);
1151		release_resource(host->ioarea);
1152		kfree(host->ioarea);
1153
1154		mmc_free_host(host->mmc);
1155	}
1156}
1157
1158#ifdef CONFIG_PM
1159static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1160{
1161	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1162
1163	__raw_writel(0, HOST_CONFIG2(host));
1164	__raw_writel(0, HOST_CONFIG(host));
1165	__raw_writel(0xffffffff, HOST_STATUS(host));
1166	__raw_writel(0, HOST_ENABLE(host));
1167	wmb(); /* drain writebuffer */
1168
1169	return 0;
1170}
1171
1172static int au1xmmc_resume(struct platform_device *pdev)
1173{
1174	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1175
1176	au1xmmc_reset_controller(host);
1177
1178	return 0;
1179}
1180#else
1181#define au1xmmc_suspend NULL
1182#define au1xmmc_resume NULL
1183#endif
1184
1185static struct platform_driver au1xmmc_driver = {
1186	.probe         = au1xmmc_probe,
1187	.remove_new    = au1xmmc_remove,
1188	.suspend       = au1xmmc_suspend,
1189	.resume        = au1xmmc_resume,
1190	.driver        = {
1191		.name  = DRIVER_NAME,
1192		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1193	},
1194};
1195
1196static int __init au1xmmc_init(void)
1197{
1198	if (has_dbdma()) {
1199		/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1200		* of 8 bits.  And since devices are shared, we need to create
1201		* our own to avoid freaking out other devices.
1202		*/
1203		memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1204		if (!memid)
1205			pr_err("au1xmmc: cannot add memory dbdma\n");
1206	}
1207	return platform_driver_register(&au1xmmc_driver);
1208}
1209
1210static void __exit au1xmmc_exit(void)
1211{
1212	if (has_dbdma() && memid)
1213		au1xxx_ddma_del_device(memid);
1214
1215	platform_driver_unregister(&au1xmmc_driver);
1216}
1217
1218module_init(au1xmmc_init);
1219module_exit(au1xmmc_exit);
1220
1221MODULE_AUTHOR("Advanced Micro Devices, Inc");
1222MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1223MODULE_LICENSE("GPL");
1224MODULE_ALIAS("platform:au1xxx-mmc");
1225