1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcm2835 sdhost driver.
4 *
5 * The 2835 has two SD controllers: The Arasan sdhci controller
6 * (supported by the iproc driver) and a custom sdhost controller
7 * (supported by this driver).
8 *
9 * The sdhci controller supports both sdcard and sdio.  The sdhost
10 * controller supports the sdcard only, but has better performance.
11 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
12 * the sdhost controller allows to use the sdhci controller for wifi
13 * support.
14 *
15 * The configuration is done by devicetree via pin muxing.  Both
16 * SD controller are available on the same pins (2 pin groups = pin 22
17 * to 27 + pin 48 to 53).  So it's possible to use both SD controllers
18 * at the same time with different pin groups.
19 *
20 * Author:      Phil Elwell <phil@raspberrypi.org>
21 *              Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
22 *
23 * Based on
24 *  mmc-bcm2835.c by Gellert Weisz
25 * which is, in turn, based on
26 *  sdhci-bcm2708.c by Broadcom
27 *  sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
28 *  sdhci.c and sdhci-pci.c by Pierre Ossman
29 */
30#include <linux/clk.h>
31#include <linux/delay.h>
32#include <linux/device.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/highmem.h>
37#include <linux/interrupt.h>
38#include <linux/io.h>
39#include <linux/iopoll.h>
40#include <linux/module.h>
41#include <linux/of_address.h>
42#include <linux/of_irq.h>
43#include <linux/platform_device.h>
44#include <linux/scatterlist.h>
45#include <linux/time.h>
46#include <linux/workqueue.h>
47
48#include <linux/mmc/host.h>
49#include <linux/mmc/mmc.h>
50#include <linux/mmc/sd.h>
51
52#define SDCMD  0x00 /* Command to SD card              - 16 R/W */
53#define SDARG  0x04 /* Argument to SD card             - 32 R/W */
54#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
55#define SDCDIV 0x0c /* Start value for clock divider   - 11 R/W */
56#define SDRSP0 0x10 /* SD card response (31:0)         - 32 R   */
57#define SDRSP1 0x14 /* SD card response (63:32)        - 32 R   */
58#define SDRSP2 0x18 /* SD card response (95:64)        - 32 R   */
59#define SDRSP3 0x1c /* SD card response (127:96)       - 32 R   */
60#define SDHSTS 0x20 /* SD host status                  - 11 R/W */
61#define SDVDD  0x30 /* SD card power control           -  1 R/W */
62#define SDEDM  0x34 /* Emergency Debug Mode            - 13 R/W */
63#define SDHCFG 0x38 /* Host configuration              -  2 R/W */
64#define SDHBCT 0x3c /* Host byte count (debug)         - 32 R/W */
65#define SDDATA 0x40 /* Data to/from SD card            - 32 R/W */
66#define SDHBLC 0x50 /* Host block count (SDIO/SDHC)    -  9 R/W */
67
68#define SDCMD_NEW_FLAG			0x8000
69#define SDCMD_FAIL_FLAG			0x4000
70#define SDCMD_BUSYWAIT			0x800
71#define SDCMD_NO_RESPONSE		0x400
72#define SDCMD_LONG_RESPONSE		0x200
73#define SDCMD_WRITE_CMD			0x80
74#define SDCMD_READ_CMD			0x40
75#define SDCMD_CMD_MASK			0x3f
76
77#define SDCDIV_MAX_CDIV			0x7ff
78
79#define SDHSTS_BUSY_IRPT		0x400
80#define SDHSTS_BLOCK_IRPT		0x200
81#define SDHSTS_SDIO_IRPT		0x100
82#define SDHSTS_REW_TIME_OUT		0x80
83#define SDHSTS_CMD_TIME_OUT		0x40
84#define SDHSTS_CRC16_ERROR		0x20
85#define SDHSTS_CRC7_ERROR		0x10
86#define SDHSTS_FIFO_ERROR		0x08
87/* Reserved */
88/* Reserved */
89#define SDHSTS_DATA_FLAG		0x01
90
91#define SDHSTS_TRANSFER_ERROR_MASK	(SDHSTS_CRC7_ERROR | \
92					 SDHSTS_CRC16_ERROR | \
93					 SDHSTS_REW_TIME_OUT | \
94					 SDHSTS_FIFO_ERROR)
95
96#define SDHSTS_ERROR_MASK		(SDHSTS_CMD_TIME_OUT | \
97					 SDHSTS_TRANSFER_ERROR_MASK)
98
99#define SDHCFG_BUSY_IRPT_EN	BIT(10)
100#define SDHCFG_BLOCK_IRPT_EN	BIT(8)
101#define SDHCFG_SDIO_IRPT_EN	BIT(5)
102#define SDHCFG_DATA_IRPT_EN	BIT(4)
103#define SDHCFG_SLOW_CARD	BIT(3)
104#define SDHCFG_WIDE_EXT_BUS	BIT(2)
105#define SDHCFG_WIDE_INT_BUS	BIT(1)
106#define SDHCFG_REL_CMD_LINE	BIT(0)
107
108#define SDVDD_POWER_OFF		0
109#define SDVDD_POWER_ON		1
110
111#define SDEDM_FORCE_DATA_MODE	BIT(19)
112#define SDEDM_CLOCK_PULSE	BIT(20)
113#define SDEDM_BYPASS		BIT(21)
114
115#define SDEDM_WRITE_THRESHOLD_SHIFT	9
116#define SDEDM_READ_THRESHOLD_SHIFT	14
117#define SDEDM_THRESHOLD_MASK		0x1f
118
119#define SDEDM_FSM_MASK		0xf
120#define SDEDM_FSM_IDENTMODE	0x0
121#define SDEDM_FSM_DATAMODE	0x1
122#define SDEDM_FSM_READDATA	0x2
123#define SDEDM_FSM_WRITEDATA	0x3
124#define SDEDM_FSM_READWAIT	0x4
125#define SDEDM_FSM_READCRC	0x5
126#define SDEDM_FSM_WRITECRC	0x6
127#define SDEDM_FSM_WRITEWAIT1	0x7
128#define SDEDM_FSM_POWERDOWN	0x8
129#define SDEDM_FSM_POWERUP	0x9
130#define SDEDM_FSM_WRITESTART1	0xa
131#define SDEDM_FSM_WRITESTART2	0xb
132#define SDEDM_FSM_GENPULSES	0xc
133#define SDEDM_FSM_WRITEWAIT2	0xd
134#define SDEDM_FSM_STARTPOWDOWN	0xf
135
136#define SDDATA_FIFO_WORDS	16
137
138#define FIFO_READ_THRESHOLD	4
139#define FIFO_WRITE_THRESHOLD	4
140#define SDDATA_FIFO_PIO_BURST	8
141
142#define PIO_THRESHOLD	1  /* Maximum block count for PIO (0 = always DMA) */
143
144struct bcm2835_host {
145	spinlock_t		lock;
146	struct mutex		mutex;
147
148	void __iomem		*ioaddr;
149	u32			phys_addr;
150
151	struct platform_device	*pdev;
152
153	int			clock;		/* Current clock speed */
154	unsigned int		max_clk;	/* Max possible freq */
155	struct work_struct	dma_work;
156	struct delayed_work	timeout_work;	/* Timer for timeouts */
157	struct sg_mapping_iter	sg_miter;	/* SG state for PIO */
158	unsigned int		blocks;		/* remaining PIO blocks */
159	int			irq;		/* Device IRQ */
160
161	u32			ns_per_fifo_word;
162
163	/* cached registers */
164	u32			hcfg;
165	u32			cdiv;
166
167	struct mmc_request	*mrq;		/* Current request */
168	struct mmc_command	*cmd;		/* Current command */
169	struct mmc_data		*data;		/* Current data request */
170	bool			data_complete:1;/* Data finished before cmd */
171	bool			use_busy:1;	/* Wait for busy interrupt */
172	bool			use_sbc:1;	/* Send CMD23 */
173
174	/* for threaded irq handler */
175	bool			irq_block;
176	bool			irq_busy;
177	bool			irq_data;
178
179	/* DMA part */
180	struct dma_chan		*dma_chan_rxtx;
181	struct dma_chan		*dma_chan;
182	struct dma_slave_config dma_cfg_rx;
183	struct dma_slave_config dma_cfg_tx;
184	struct dma_async_tx_descriptor	*dma_desc;
185	u32			dma_dir;
186	u32			drain_words;
187	struct page		*drain_page;
188	u32			drain_offset;
189	bool			use_dma;
190};
191
192static void bcm2835_dumpcmd(struct bcm2835_host *host, struct mmc_command *cmd,
193			    const char *label)
194{
195	struct device *dev = &host->pdev->dev;
196
197	if (!cmd)
198		return;
199
200	dev_dbg(dev, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
201		(cmd == host->cmd) ? '>' : ' ',
202		label, cmd->opcode, cmd->arg, cmd->flags,
203		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
204		cmd->error);
205}
206
207static void bcm2835_dumpregs(struct bcm2835_host *host)
208{
209	struct mmc_request *mrq = host->mrq;
210	struct device *dev = &host->pdev->dev;
211
212	if (mrq) {
213		bcm2835_dumpcmd(host, mrq->sbc, "sbc");
214		bcm2835_dumpcmd(host, mrq->cmd, "cmd");
215		if (mrq->data) {
216			dev_dbg(dev, "data blocks %x blksz %x - err %d\n",
217				mrq->data->blocks,
218				mrq->data->blksz,
219				mrq->data->error);
220		}
221		bcm2835_dumpcmd(host, mrq->stop, "stop");
222	}
223
224	dev_dbg(dev, "=========== REGISTER DUMP ===========\n");
225	dev_dbg(dev, "SDCMD  0x%08x\n", readl(host->ioaddr + SDCMD));
226	dev_dbg(dev, "SDARG  0x%08x\n", readl(host->ioaddr + SDARG));
227	dev_dbg(dev, "SDTOUT 0x%08x\n", readl(host->ioaddr + SDTOUT));
228	dev_dbg(dev, "SDCDIV 0x%08x\n", readl(host->ioaddr + SDCDIV));
229	dev_dbg(dev, "SDRSP0 0x%08x\n", readl(host->ioaddr + SDRSP0));
230	dev_dbg(dev, "SDRSP1 0x%08x\n", readl(host->ioaddr + SDRSP1));
231	dev_dbg(dev, "SDRSP2 0x%08x\n", readl(host->ioaddr + SDRSP2));
232	dev_dbg(dev, "SDRSP3 0x%08x\n", readl(host->ioaddr + SDRSP3));
233	dev_dbg(dev, "SDHSTS 0x%08x\n", readl(host->ioaddr + SDHSTS));
234	dev_dbg(dev, "SDVDD  0x%08x\n", readl(host->ioaddr + SDVDD));
235	dev_dbg(dev, "SDEDM  0x%08x\n", readl(host->ioaddr + SDEDM));
236	dev_dbg(dev, "SDHCFG 0x%08x\n", readl(host->ioaddr + SDHCFG));
237	dev_dbg(dev, "SDHBCT 0x%08x\n", readl(host->ioaddr + SDHBCT));
238	dev_dbg(dev, "SDHBLC 0x%08x\n", readl(host->ioaddr + SDHBLC));
239	dev_dbg(dev, "===========================================\n");
240}
241
242static void bcm2835_reset_internal(struct bcm2835_host *host)
243{
244	u32 temp;
245
246	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
247	writel(0, host->ioaddr + SDCMD);
248	writel(0, host->ioaddr + SDARG);
249	writel(0xf00000, host->ioaddr + SDTOUT);
250	writel(0, host->ioaddr + SDCDIV);
251	writel(0x7f8, host->ioaddr + SDHSTS); /* Write 1s to clear */
252	writel(0, host->ioaddr + SDHCFG);
253	writel(0, host->ioaddr + SDHBCT);
254	writel(0, host->ioaddr + SDHBLC);
255
256	/* Limit fifo usage due to silicon bug */
257	temp = readl(host->ioaddr + SDEDM);
258	temp &= ~((SDEDM_THRESHOLD_MASK << SDEDM_READ_THRESHOLD_SHIFT) |
259		  (SDEDM_THRESHOLD_MASK << SDEDM_WRITE_THRESHOLD_SHIFT));
260	temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) |
261		(FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT);
262	writel(temp, host->ioaddr + SDEDM);
263	msleep(20);
264	writel(SDVDD_POWER_ON, host->ioaddr + SDVDD);
265	msleep(20);
266	host->clock = 0;
267	writel(host->hcfg, host->ioaddr + SDHCFG);
268	writel(host->cdiv, host->ioaddr + SDCDIV);
269}
270
271static void bcm2835_reset(struct mmc_host *mmc)
272{
273	struct bcm2835_host *host = mmc_priv(mmc);
274
275	if (host->dma_chan)
276		dmaengine_terminate_sync(host->dma_chan);
277	host->dma_chan = NULL;
278	bcm2835_reset_internal(host);
279}
280
281static void bcm2835_finish_command(struct bcm2835_host *host);
282
283static void bcm2835_wait_transfer_complete(struct bcm2835_host *host)
284{
285	int timediff;
286	u32 alternate_idle;
287
288	alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ?
289		SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1;
290
291	timediff = 0;
292
293	while (1) {
294		u32 edm, fsm;
295
296		edm = readl(host->ioaddr + SDEDM);
297		fsm = edm & SDEDM_FSM_MASK;
298
299		if ((fsm == SDEDM_FSM_IDENTMODE) ||
300		    (fsm == SDEDM_FSM_DATAMODE))
301			break;
302		if (fsm == alternate_idle) {
303			writel(edm | SDEDM_FORCE_DATA_MODE,
304			       host->ioaddr + SDEDM);
305			break;
306		}
307
308		timediff++;
309		if (timediff == 100000) {
310			dev_err(&host->pdev->dev,
311				"wait_transfer_complete - still waiting after %d retries\n",
312				timediff);
313			bcm2835_dumpregs(host);
314			host->mrq->data->error = -ETIMEDOUT;
315			return;
316		}
317		cpu_relax();
318	}
319}
320
321static void bcm2835_dma_complete(void *param)
322{
323	struct bcm2835_host *host = param;
324
325	schedule_work(&host->dma_work);
326}
327
328static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
329{
330	size_t blksize;
331	unsigned long wait_max;
332
333	blksize = host->data->blksz;
334
335	wait_max = jiffies + msecs_to_jiffies(500);
336
337	while (blksize) {
338		int copy_words;
339		u32 hsts = 0;
340		size_t len;
341		u32 *buf;
342
343		if (!sg_miter_next(&host->sg_miter)) {
344			host->data->error = -EINVAL;
345			break;
346		}
347
348		len = min(host->sg_miter.length, blksize);
349		if (len % 4) {
350			host->data->error = -EINVAL;
351			break;
352		}
353
354		blksize -= len;
355		host->sg_miter.consumed = len;
356
357		buf = (u32 *)host->sg_miter.addr;
358
359		copy_words = len / 4;
360
361		while (copy_words) {
362			int burst_words, words;
363			u32 edm;
364
365			burst_words = min(SDDATA_FIFO_PIO_BURST, copy_words);
366			edm = readl(host->ioaddr + SDEDM);
367			if (is_read)
368				words = ((edm >> 4) & 0x1f);
369			else
370				words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f);
371
372			if (words < burst_words) {
373				int fsm_state = (edm & SDEDM_FSM_MASK);
374				struct device *dev = &host->pdev->dev;
375
376				if ((is_read &&
377				     (fsm_state != SDEDM_FSM_READDATA &&
378				      fsm_state != SDEDM_FSM_READWAIT &&
379				      fsm_state != SDEDM_FSM_READCRC)) ||
380				    (!is_read &&
381				     (fsm_state != SDEDM_FSM_WRITEDATA &&
382				      fsm_state != SDEDM_FSM_WRITESTART1 &&
383				      fsm_state != SDEDM_FSM_WRITESTART2))) {
384					hsts = readl(host->ioaddr + SDHSTS);
385					dev_err(dev, "fsm %x, hsts %08x\n",
386						fsm_state, hsts);
387					if (hsts & SDHSTS_ERROR_MASK)
388						break;
389				}
390
391				if (time_after(jiffies, wait_max)) {
392					dev_err(dev, "PIO %s timeout - EDM %08x\n",
393						is_read ? "read" : "write",
394						edm);
395					hsts = SDHSTS_REW_TIME_OUT;
396					break;
397				}
398				ndelay((burst_words - words) *
399				       host->ns_per_fifo_word);
400				continue;
401			} else if (words > copy_words) {
402				words = copy_words;
403			}
404
405			copy_words -= words;
406
407			while (words) {
408				if (is_read)
409					*(buf++) = readl(host->ioaddr + SDDATA);
410				else
411					writel(*(buf++), host->ioaddr + SDDATA);
412				words--;
413			}
414		}
415
416		if (hsts & SDHSTS_ERROR_MASK)
417			break;
418	}
419
420	sg_miter_stop(&host->sg_miter);
421}
422
423static void bcm2835_transfer_pio(struct bcm2835_host *host)
424{
425	struct device *dev = &host->pdev->dev;
426	u32 sdhsts;
427	bool is_read;
428
429	is_read = (host->data->flags & MMC_DATA_READ) != 0;
430	bcm2835_transfer_block_pio(host, is_read);
431
432	sdhsts = readl(host->ioaddr + SDHSTS);
433	if (sdhsts & (SDHSTS_CRC16_ERROR |
434		      SDHSTS_CRC7_ERROR |
435		      SDHSTS_FIFO_ERROR)) {
436		dev_err(dev, "%s transfer error - HSTS %08x\n",
437			is_read ? "read" : "write", sdhsts);
438		host->data->error = -EILSEQ;
439	} else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
440			      SDHSTS_REW_TIME_OUT))) {
441		dev_err(dev, "%s timeout error - HSTS %08x\n",
442			is_read ? "read" : "write", sdhsts);
443		host->data->error = -ETIMEDOUT;
444	}
445}
446
447static
448void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
449{
450	int sg_len, dir_data, dir_slave;
451	struct dma_async_tx_descriptor *desc = NULL;
452	struct dma_chan *dma_chan;
453
454	dma_chan = host->dma_chan_rxtx;
455	if (data->flags & MMC_DATA_READ) {
456		dir_data = DMA_FROM_DEVICE;
457		dir_slave = DMA_DEV_TO_MEM;
458	} else {
459		dir_data = DMA_TO_DEVICE;
460		dir_slave = DMA_MEM_TO_DEV;
461	}
462
463	/* The block doesn't manage the FIFO DREQs properly for
464	 * multi-block transfers, so don't attempt to DMA the final
465	 * few words.  Unfortunately this requires the final sg entry
466	 * to be trimmed.  N.B. This code demands that the overspill
467	 * is contained in a single sg entry.
468	 */
469
470	host->drain_words = 0;
471	if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) {
472		struct scatterlist *sg;
473		u32 len;
474		int i;
475
476		len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4,
477			  (u32)data->blocks * data->blksz);
478
479		for_each_sg(data->sg, sg, data->sg_len, i) {
480			if (sg_is_last(sg)) {
481				WARN_ON(sg->length < len);
482				sg->length -= len;
483				host->drain_page = sg_page(sg);
484				host->drain_offset = sg->offset + sg->length;
485			}
486		}
487		host->drain_words = len / 4;
488	}
489
490	/* The parameters have already been validated, so this will not fail */
491	(void)dmaengine_slave_config(dma_chan,
492				     (dir_data == DMA_FROM_DEVICE) ?
493				     &host->dma_cfg_rx :
494				     &host->dma_cfg_tx);
495
496	sg_len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len,
497			    dir_data);
498	if (!sg_len)
499		return;
500
501	desc = dmaengine_prep_slave_sg(dma_chan, data->sg, sg_len, dir_slave,
502				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
503
504	if (!desc) {
505		dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
506		return;
507	}
508
509	desc->callback = bcm2835_dma_complete;
510	desc->callback_param = host;
511	host->dma_desc = desc;
512	host->dma_chan = dma_chan;
513	host->dma_dir = dir_data;
514}
515
516static void bcm2835_start_dma(struct bcm2835_host *host)
517{
518	dmaengine_submit(host->dma_desc);
519	dma_async_issue_pending(host->dma_chan);
520}
521
522static void bcm2835_set_transfer_irqs(struct bcm2835_host *host)
523{
524	u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN |
525		SDHCFG_BUSY_IRPT_EN;
526
527	if (host->dma_desc) {
528		host->hcfg = (host->hcfg & ~all_irqs) |
529			SDHCFG_BUSY_IRPT_EN;
530	} else {
531		host->hcfg = (host->hcfg & ~all_irqs) |
532			SDHCFG_DATA_IRPT_EN |
533			SDHCFG_BUSY_IRPT_EN;
534	}
535
536	writel(host->hcfg, host->ioaddr + SDHCFG);
537}
538
539static
540void bcm2835_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd)
541{
542	struct mmc_data *data = cmd->data;
543
544	WARN_ON(host->data);
545
546	host->data = data;
547	if (!data)
548		return;
549
550	host->data_complete = false;
551	host->data->bytes_xfered = 0;
552
553	if (!host->dma_desc) {
554		/* Use PIO */
555		int flags = SG_MITER_ATOMIC;
556
557		if (data->flags & MMC_DATA_READ)
558			flags |= SG_MITER_TO_SG;
559		else
560			flags |= SG_MITER_FROM_SG;
561		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
562		host->blocks = data->blocks;
563	}
564
565	bcm2835_set_transfer_irqs(host);
566
567	writel(data->blksz, host->ioaddr + SDHBCT);
568	writel(data->blocks, host->ioaddr + SDHBLC);
569}
570
571static u32 bcm2835_read_wait_sdcmd(struct bcm2835_host *host, u32 max_ms)
572{
573	struct device *dev = &host->pdev->dev;
574	u32 value;
575	int ret;
576
577	ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
578				 !(value & SDCMD_NEW_FLAG), 1, 10);
579	if (ret == -ETIMEDOUT)
580		/* if it takes a while make poll interval bigger */
581		ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
582					 !(value & SDCMD_NEW_FLAG),
583					 10, max_ms * 1000);
584	if (ret == -ETIMEDOUT)
585		dev_err(dev, "%s: timeout (%d ms)\n", __func__, max_ms);
586
587	return value;
588}
589
590static void bcm2835_finish_request(struct bcm2835_host *host)
591{
592	struct dma_chan *terminate_chan = NULL;
593	struct mmc_request *mrq;
594
595	cancel_delayed_work(&host->timeout_work);
596
597	mrq = host->mrq;
598
599	host->mrq = NULL;
600	host->cmd = NULL;
601	host->data = NULL;
602
603	host->dma_desc = NULL;
604	terminate_chan = host->dma_chan;
605	host->dma_chan = NULL;
606
607	if (terminate_chan) {
608		int err = dmaengine_terminate_all(terminate_chan);
609
610		if (err)
611			dev_err(&host->pdev->dev,
612				"failed to terminate DMA (%d)\n", err);
613	}
614
615	mmc_request_done(mmc_from_priv(host), mrq);
616}
617
618static
619bool bcm2835_send_command(struct bcm2835_host *host, struct mmc_command *cmd)
620{
621	struct device *dev = &host->pdev->dev;
622	u32 sdcmd, sdhsts;
623	unsigned long timeout;
624
625	WARN_ON(host->cmd);
626
627	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
628	if (sdcmd & SDCMD_NEW_FLAG) {
629		dev_err(dev, "previous command never completed.\n");
630		bcm2835_dumpregs(host);
631		cmd->error = -EILSEQ;
632		bcm2835_finish_request(host);
633		return false;
634	}
635
636	if (!cmd->data && cmd->busy_timeout > 9000)
637		timeout = DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
638	else
639		timeout = 10 * HZ;
640	schedule_delayed_work(&host->timeout_work, timeout);
641
642	host->cmd = cmd;
643
644	/* Clear any error flags */
645	sdhsts = readl(host->ioaddr + SDHSTS);
646	if (sdhsts & SDHSTS_ERROR_MASK)
647		writel(sdhsts, host->ioaddr + SDHSTS);
648
649	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
650		dev_err(dev, "unsupported response type!\n");
651		cmd->error = -EINVAL;
652		bcm2835_finish_request(host);
653		return false;
654	}
655
656	bcm2835_prepare_data(host, cmd);
657
658	writel(cmd->arg, host->ioaddr + SDARG);
659
660	sdcmd = cmd->opcode & SDCMD_CMD_MASK;
661
662	host->use_busy = false;
663	if (!(cmd->flags & MMC_RSP_PRESENT)) {
664		sdcmd |= SDCMD_NO_RESPONSE;
665	} else {
666		if (cmd->flags & MMC_RSP_136)
667			sdcmd |= SDCMD_LONG_RESPONSE;
668		if (cmd->flags & MMC_RSP_BUSY) {
669			sdcmd |= SDCMD_BUSYWAIT;
670			host->use_busy = true;
671		}
672	}
673
674	if (cmd->data) {
675		if (cmd->data->flags & MMC_DATA_WRITE)
676			sdcmd |= SDCMD_WRITE_CMD;
677		if (cmd->data->flags & MMC_DATA_READ)
678			sdcmd |= SDCMD_READ_CMD;
679	}
680
681	writel(sdcmd | SDCMD_NEW_FLAG, host->ioaddr + SDCMD);
682
683	return true;
684}
685
686static void bcm2835_transfer_complete(struct bcm2835_host *host)
687{
688	struct mmc_data *data;
689
690	WARN_ON(!host->data_complete);
691
692	data = host->data;
693	host->data = NULL;
694
695	/* Need to send CMD12 if -
696	 * a) open-ended multiblock transfer (no CMD23)
697	 * b) error in multiblock transfer
698	 */
699	if (host->mrq->stop && (data->error || !host->use_sbc)) {
700		if (bcm2835_send_command(host, host->mrq->stop)) {
701			/* No busy, so poll for completion */
702			if (!host->use_busy)
703				bcm2835_finish_command(host);
704		}
705	} else {
706		bcm2835_wait_transfer_complete(host);
707		bcm2835_finish_request(host);
708	}
709}
710
711static void bcm2835_finish_data(struct bcm2835_host *host)
712{
713	struct device *dev = &host->pdev->dev;
714	struct mmc_data *data;
715
716	data = host->data;
717
718	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
719	writel(host->hcfg, host->ioaddr + SDHCFG);
720
721	data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks);
722
723	host->data_complete = true;
724
725	if (host->cmd) {
726		/* Data managed to finish before the
727		 * command completed. Make sure we do
728		 * things in the proper order.
729		 */
730		dev_dbg(dev, "Finished early - HSTS %08x\n",
731			readl(host->ioaddr + SDHSTS));
732	} else {
733		bcm2835_transfer_complete(host);
734	}
735}
736
737static void bcm2835_finish_command(struct bcm2835_host *host)
738{
739	struct device *dev = &host->pdev->dev;
740	struct mmc_command *cmd = host->cmd;
741	u32 sdcmd;
742
743	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
744
745	/* Check for errors */
746	if (sdcmd & SDCMD_NEW_FLAG) {
747		dev_err(dev, "command never completed.\n");
748		bcm2835_dumpregs(host);
749		host->cmd->error = -EIO;
750		bcm2835_finish_request(host);
751		return;
752	} else if (sdcmd & SDCMD_FAIL_FLAG) {
753		u32 sdhsts = readl(host->ioaddr + SDHSTS);
754
755		/* Clear the errors */
756		writel(SDHSTS_ERROR_MASK, host->ioaddr + SDHSTS);
757
758		if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
759		    (host->cmd->opcode != MMC_SEND_OP_COND)) {
760			u32 edm, fsm;
761
762			if (sdhsts & SDHSTS_CMD_TIME_OUT) {
763				host->cmd->error = -ETIMEDOUT;
764			} else {
765				dev_err(dev, "unexpected command %d error\n",
766					host->cmd->opcode);
767				bcm2835_dumpregs(host);
768				host->cmd->error = -EILSEQ;
769			}
770			edm = readl(host->ioaddr + SDEDM);
771			fsm = edm & SDEDM_FSM_MASK;
772			if (fsm == SDEDM_FSM_READWAIT ||
773			    fsm == SDEDM_FSM_WRITESTART1)
774				/* Kick the FSM out of its wait */
775				writel(edm | SDEDM_FORCE_DATA_MODE,
776				       host->ioaddr + SDEDM);
777			bcm2835_finish_request(host);
778			return;
779		}
780	}
781
782	if (cmd->flags & MMC_RSP_PRESENT) {
783		if (cmd->flags & MMC_RSP_136) {
784			int i;
785
786			for (i = 0; i < 4; i++) {
787				cmd->resp[3 - i] =
788					readl(host->ioaddr + SDRSP0 + i * 4);
789			}
790		} else {
791			cmd->resp[0] = readl(host->ioaddr + SDRSP0);
792		}
793	}
794
795	if (cmd == host->mrq->sbc) {
796		/* Finished CMD23, now send actual command. */
797		host->cmd = NULL;
798		if (bcm2835_send_command(host, host->mrq->cmd)) {
799			if (host->data && host->dma_desc)
800				/* DMA transfer starts now, PIO starts
801				 * after irq
802				 */
803				bcm2835_start_dma(host);
804
805			if (!host->use_busy)
806				bcm2835_finish_command(host);
807		}
808	} else if (cmd == host->mrq->stop) {
809		/* Finished CMD12 */
810		bcm2835_finish_request(host);
811	} else {
812		/* Processed actual command. */
813		host->cmd = NULL;
814		if (!host->data)
815			bcm2835_finish_request(host);
816		else if (host->data_complete)
817			bcm2835_transfer_complete(host);
818	}
819}
820
821static void bcm2835_timeout(struct work_struct *work)
822{
823	struct delayed_work *d = to_delayed_work(work);
824	struct bcm2835_host *host =
825		container_of(d, struct bcm2835_host, timeout_work);
826	struct device *dev = &host->pdev->dev;
827
828	mutex_lock(&host->mutex);
829
830	if (host->mrq) {
831		dev_err(dev, "timeout waiting for hardware interrupt.\n");
832		bcm2835_dumpregs(host);
833
834		bcm2835_reset(mmc_from_priv(host));
835
836		if (host->data) {
837			host->data->error = -ETIMEDOUT;
838			bcm2835_finish_data(host);
839		} else {
840			if (host->cmd)
841				host->cmd->error = -ETIMEDOUT;
842			else
843				host->mrq->cmd->error = -ETIMEDOUT;
844
845			bcm2835_finish_request(host);
846		}
847	}
848
849	mutex_unlock(&host->mutex);
850}
851
852static bool bcm2835_check_cmd_error(struct bcm2835_host *host, u32 intmask)
853{
854	struct device *dev = &host->pdev->dev;
855
856	if (!(intmask & SDHSTS_ERROR_MASK))
857		return false;
858
859	if (!host->cmd)
860		return true;
861
862	dev_err(dev, "sdhost_busy_irq: intmask %08x\n", intmask);
863	if (intmask & SDHSTS_CRC7_ERROR) {
864		host->cmd->error = -EILSEQ;
865	} else if (intmask & (SDHSTS_CRC16_ERROR |
866			      SDHSTS_FIFO_ERROR)) {
867		if (host->mrq->data)
868			host->mrq->data->error = -EILSEQ;
869		else
870			host->cmd->error = -EILSEQ;
871	} else if (intmask & SDHSTS_REW_TIME_OUT) {
872		if (host->mrq->data)
873			host->mrq->data->error = -ETIMEDOUT;
874		else
875			host->cmd->error = -ETIMEDOUT;
876	} else if (intmask & SDHSTS_CMD_TIME_OUT) {
877		host->cmd->error = -ETIMEDOUT;
878	}
879	bcm2835_dumpregs(host);
880	return true;
881}
882
883static void bcm2835_check_data_error(struct bcm2835_host *host, u32 intmask)
884{
885	if (!host->data)
886		return;
887	if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_FIFO_ERROR))
888		host->data->error = -EILSEQ;
889	if (intmask & SDHSTS_REW_TIME_OUT)
890		host->data->error = -ETIMEDOUT;
891}
892
893static void bcm2835_busy_irq(struct bcm2835_host *host)
894{
895	if (WARN_ON(!host->cmd)) {
896		bcm2835_dumpregs(host);
897		return;
898	}
899
900	if (WARN_ON(!host->use_busy)) {
901		bcm2835_dumpregs(host);
902		return;
903	}
904	host->use_busy = false;
905
906	bcm2835_finish_command(host);
907}
908
909static void bcm2835_data_irq(struct bcm2835_host *host, u32 intmask)
910{
911	/* There are no dedicated data/space available interrupt
912	 * status bits, so it is necessary to use the single shared
913	 * data/space available FIFO status bits. It is therefore not
914	 * an error to get here when there is no data transfer in
915	 * progress.
916	 */
917	if (!host->data)
918		return;
919
920	bcm2835_check_data_error(host, intmask);
921	if (host->data->error)
922		goto finished;
923
924	if (host->data->flags & MMC_DATA_WRITE) {
925		/* Use the block interrupt for writes after the first block */
926		host->hcfg &= ~(SDHCFG_DATA_IRPT_EN);
927		host->hcfg |= SDHCFG_BLOCK_IRPT_EN;
928		writel(host->hcfg, host->ioaddr + SDHCFG);
929		bcm2835_transfer_pio(host);
930	} else {
931		bcm2835_transfer_pio(host);
932		host->blocks--;
933		if ((host->blocks == 0) || host->data->error)
934			goto finished;
935	}
936	return;
937
938finished:
939	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
940	writel(host->hcfg, host->ioaddr + SDHCFG);
941}
942
943static void bcm2835_data_threaded_irq(struct bcm2835_host *host)
944{
945	if (!host->data)
946		return;
947	if ((host->blocks == 0) || host->data->error)
948		bcm2835_finish_data(host);
949}
950
951static void bcm2835_block_irq(struct bcm2835_host *host)
952{
953	if (WARN_ON(!host->data)) {
954		bcm2835_dumpregs(host);
955		return;
956	}
957
958	if (!host->dma_desc) {
959		WARN_ON(!host->blocks);
960		if (host->data->error || (--host->blocks == 0))
961			bcm2835_finish_data(host);
962		else
963			bcm2835_transfer_pio(host);
964	} else if (host->data->flags & MMC_DATA_WRITE) {
965		bcm2835_finish_data(host);
966	}
967}
968
969static irqreturn_t bcm2835_irq(int irq, void *dev_id)
970{
971	irqreturn_t result = IRQ_NONE;
972	struct bcm2835_host *host = dev_id;
973	u32 intmask;
974
975	spin_lock(&host->lock);
976
977	intmask = readl(host->ioaddr + SDHSTS);
978
979	writel(SDHSTS_BUSY_IRPT |
980	       SDHSTS_BLOCK_IRPT |
981	       SDHSTS_SDIO_IRPT |
982	       SDHSTS_DATA_FLAG,
983	       host->ioaddr + SDHSTS);
984
985	if (intmask & SDHSTS_BLOCK_IRPT) {
986		bcm2835_check_data_error(host, intmask);
987		host->irq_block = true;
988		result = IRQ_WAKE_THREAD;
989	}
990
991	if (intmask & SDHSTS_BUSY_IRPT) {
992		if (!bcm2835_check_cmd_error(host, intmask)) {
993			host->irq_busy = true;
994			result = IRQ_WAKE_THREAD;
995		} else {
996			result = IRQ_HANDLED;
997		}
998	}
999
1000	/* There is no true data interrupt status bit, so it is
1001	 * necessary to qualify the data flag with the interrupt
1002	 * enable bit.
1003	 */
1004	if ((intmask & SDHSTS_DATA_FLAG) &&
1005	    (host->hcfg & SDHCFG_DATA_IRPT_EN)) {
1006		bcm2835_data_irq(host, intmask);
1007		host->irq_data = true;
1008		result = IRQ_WAKE_THREAD;
1009	}
1010
1011	spin_unlock(&host->lock);
1012
1013	return result;
1014}
1015
1016static irqreturn_t bcm2835_threaded_irq(int irq, void *dev_id)
1017{
1018	struct bcm2835_host *host = dev_id;
1019	unsigned long flags;
1020	bool block, busy, data;
1021
1022	spin_lock_irqsave(&host->lock, flags);
1023
1024	block = host->irq_block;
1025	busy  = host->irq_busy;
1026	data  = host->irq_data;
1027	host->irq_block = false;
1028	host->irq_busy  = false;
1029	host->irq_data  = false;
1030
1031	spin_unlock_irqrestore(&host->lock, flags);
1032
1033	mutex_lock(&host->mutex);
1034
1035	if (block)
1036		bcm2835_block_irq(host);
1037	if (busy)
1038		bcm2835_busy_irq(host);
1039	if (data)
1040		bcm2835_data_threaded_irq(host);
1041
1042	mutex_unlock(&host->mutex);
1043
1044	return IRQ_HANDLED;
1045}
1046
1047static void bcm2835_dma_complete_work(struct work_struct *work)
1048{
1049	struct bcm2835_host *host =
1050		container_of(work, struct bcm2835_host, dma_work);
1051	struct mmc_data *data;
1052
1053	mutex_lock(&host->mutex);
1054
1055	data = host->data;
1056
1057	if (host->dma_chan) {
1058		dma_unmap_sg(host->dma_chan->device->dev,
1059			     data->sg, data->sg_len,
1060			     host->dma_dir);
1061
1062		host->dma_chan = NULL;
1063	}
1064
1065	if (host->drain_words) {
1066		void *page;
1067		u32 *buf;
1068
1069		if (host->drain_offset & PAGE_MASK) {
1070			host->drain_page += host->drain_offset >> PAGE_SHIFT;
1071			host->drain_offset &= ~PAGE_MASK;
1072		}
1073		page = kmap_local_page(host->drain_page);
1074		buf = page + host->drain_offset;
1075
1076		while (host->drain_words) {
1077			u32 edm = readl(host->ioaddr + SDEDM);
1078
1079			if ((edm >> 4) & 0x1f)
1080				*(buf++) = readl(host->ioaddr + SDDATA);
1081			host->drain_words--;
1082		}
1083
1084		kunmap_local(page);
1085	}
1086
1087	bcm2835_finish_data(host);
1088
1089	mutex_unlock(&host->mutex);
1090}
1091
1092static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
1093{
1094	struct mmc_host *mmc = mmc_from_priv(host);
1095	int div;
1096
1097	/* The SDCDIV register has 11 bits, and holds (div - 2).  But
1098	 * in data mode the max is 50MHz wihout a minimum, and only
1099	 * the bottom 3 bits are used. Since the switch over is
1100	 * automatic (unless we have marked the card as slow...),
1101	 * chosen values have to make sense in both modes.  Ident mode
1102	 * must be 100-400KHz, so can range check the requested
1103	 * clock. CMD15 must be used to return to data mode, so this
1104	 * can be monitored.
1105	 *
1106	 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1107	 *                 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1108	 *
1109	 *		 623->400KHz/27.8MHz
1110	 *		 reset value (507)->491159/50MHz
1111	 *
1112	 * BUT, the 3-bit clock divisor in data mode is too small if
1113	 * the core clock is higher than 250MHz, so instead use the
1114	 * SLOW_CARD configuration bit to force the use of the ident
1115	 * clock divisor at all times.
1116	 */
1117
1118	if (clock < 100000) {
1119		/* Can't stop the clock, but make it as slow as possible
1120		 * to show willing
1121		 */
1122		host->cdiv = SDCDIV_MAX_CDIV;
1123		writel(host->cdiv, host->ioaddr + SDCDIV);
1124		return;
1125	}
1126
1127	div = host->max_clk / clock;
1128	if (div < 2)
1129		div = 2;
1130	if ((host->max_clk / div) > clock)
1131		div++;
1132	div -= 2;
1133
1134	if (div > SDCDIV_MAX_CDIV)
1135		div = SDCDIV_MAX_CDIV;
1136
1137	clock = host->max_clk / (div + 2);
1138	mmc->actual_clock = clock;
1139
1140	/* Calibrate some delays */
1141
1142	host->ns_per_fifo_word = (1000000000 / clock) *
1143		((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
1144
1145	host->cdiv = div;
1146	writel(host->cdiv, host->ioaddr + SDCDIV);
1147
1148	/* Set the timeout to 500ms */
1149	writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT);
1150}
1151
1152static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
1153{
1154	struct bcm2835_host *host = mmc_priv(mmc);
1155	struct device *dev = &host->pdev->dev;
1156	u32 edm, fsm;
1157
1158	/* Reset the error statuses in case this is a retry */
1159	if (mrq->sbc)
1160		mrq->sbc->error = 0;
1161	if (mrq->cmd)
1162		mrq->cmd->error = 0;
1163	if (mrq->data)
1164		mrq->data->error = 0;
1165	if (mrq->stop)
1166		mrq->stop->error = 0;
1167
1168	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1169		dev_err(dev, "unsupported block size (%d bytes)\n",
1170			mrq->data->blksz);
1171
1172		if (mrq->cmd)
1173			mrq->cmd->error = -EINVAL;
1174
1175		mmc_request_done(mmc, mrq);
1176		return;
1177	}
1178
1179	mutex_lock(&host->mutex);
1180
1181	WARN_ON(host->mrq);
1182	host->mrq = mrq;
1183
1184	edm = readl(host->ioaddr + SDEDM);
1185	fsm = edm & SDEDM_FSM_MASK;
1186
1187	if ((fsm != SDEDM_FSM_IDENTMODE) &&
1188	    (fsm != SDEDM_FSM_DATAMODE)) {
1189		dev_err(dev, "previous command (%d) not complete (EDM %08x)\n",
1190			readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
1191			edm);
1192		bcm2835_dumpregs(host);
1193
1194		if (mrq->cmd)
1195			mrq->cmd->error = -EILSEQ;
1196
1197		bcm2835_finish_request(host);
1198		mutex_unlock(&host->mutex);
1199		return;
1200	}
1201
1202	if (host->use_dma && mrq->data && (mrq->data->blocks > PIO_THRESHOLD))
1203		bcm2835_prepare_dma(host, mrq->data);
1204
1205	host->use_sbc = !!mrq->sbc && host->mrq->data &&
1206			(host->mrq->data->flags & MMC_DATA_READ);
1207	if (host->use_sbc) {
1208		if (bcm2835_send_command(host, mrq->sbc)) {
1209			if (!host->use_busy)
1210				bcm2835_finish_command(host);
1211		}
1212	} else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) {
1213		if (host->data && host->dma_desc) {
1214			/* DMA transfer starts now, PIO starts after irq */
1215			bcm2835_start_dma(host);
1216		}
1217
1218		if (!host->use_busy)
1219			bcm2835_finish_command(host);
1220	}
1221
1222	mutex_unlock(&host->mutex);
1223}
1224
1225static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1226{
1227	struct bcm2835_host *host = mmc_priv(mmc);
1228
1229	mutex_lock(&host->mutex);
1230
1231	if (!ios->clock || ios->clock != host->clock) {
1232		bcm2835_set_clock(host, ios->clock);
1233		host->clock = ios->clock;
1234	}
1235
1236	/* set bus width */
1237	host->hcfg &= ~SDHCFG_WIDE_EXT_BUS;
1238	if (ios->bus_width == MMC_BUS_WIDTH_4)
1239		host->hcfg |= SDHCFG_WIDE_EXT_BUS;
1240
1241	host->hcfg |= SDHCFG_WIDE_INT_BUS;
1242
1243	/* Disable clever clock switching, to cope with fast core clocks */
1244	host->hcfg |= SDHCFG_SLOW_CARD;
1245
1246	writel(host->hcfg, host->ioaddr + SDHCFG);
1247
1248	mutex_unlock(&host->mutex);
1249}
1250
1251static const struct mmc_host_ops bcm2835_ops = {
1252	.request = bcm2835_request,
1253	.set_ios = bcm2835_set_ios,
1254	.card_hw_reset = bcm2835_reset,
1255};
1256
1257static int bcm2835_add_host(struct bcm2835_host *host)
1258{
1259	struct mmc_host *mmc = mmc_from_priv(host);
1260	struct device *dev = &host->pdev->dev;
1261	char pio_limit_string[20];
1262	int ret;
1263
1264	if (!mmc->f_max || mmc->f_max > host->max_clk)
1265		mmc->f_max = host->max_clk;
1266	mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1267
1268	mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
1269
1270	dev_dbg(dev, "f_max %d, f_min %d, max_busy_timeout %d\n",
1271		mmc->f_max, mmc->f_min, mmc->max_busy_timeout);
1272
1273	/* host controller capabilities */
1274	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1275		     MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_CMD23;
1276
1277	spin_lock_init(&host->lock);
1278	mutex_init(&host->mutex);
1279
1280	if (!host->dma_chan_rxtx) {
1281		dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
1282		host->use_dma = false;
1283	} else {
1284		host->use_dma = true;
1285
1286		host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1287		host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1288		host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
1289		host->dma_cfg_tx.src_addr = 0;
1290		host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
1291
1292		host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1293		host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1294		host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
1295		host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
1296		host->dma_cfg_rx.dst_addr = 0;
1297
1298		if (dmaengine_slave_config(host->dma_chan_rxtx,
1299					   &host->dma_cfg_tx) != 0 ||
1300		    dmaengine_slave_config(host->dma_chan_rxtx,
1301					   &host->dma_cfg_rx) != 0)
1302			host->use_dma = false;
1303	}
1304
1305	mmc->max_segs = 128;
1306	mmc->max_req_size = min_t(size_t, 524288, dma_max_mapping_size(dev));
1307	mmc->max_seg_size = mmc->max_req_size;
1308	mmc->max_blk_size = 1024;
1309	mmc->max_blk_count =  65535;
1310
1311	/* report supported voltage ranges */
1312	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1313
1314	INIT_WORK(&host->dma_work, bcm2835_dma_complete_work);
1315	INIT_DELAYED_WORK(&host->timeout_work, bcm2835_timeout);
1316
1317	/* Set interrupt enables */
1318	host->hcfg = SDHCFG_BUSY_IRPT_EN;
1319
1320	bcm2835_reset_internal(host);
1321
1322	ret = request_threaded_irq(host->irq, bcm2835_irq,
1323				   bcm2835_threaded_irq,
1324				   0, mmc_hostname(mmc), host);
1325	if (ret) {
1326		dev_err(dev, "failed to request IRQ %d: %d\n", host->irq, ret);
1327		return ret;
1328	}
1329
1330	ret = mmc_add_host(mmc);
1331	if (ret) {
1332		free_irq(host->irq, host);
1333		return ret;
1334	}
1335
1336	pio_limit_string[0] = '\0';
1337	if (host->use_dma && (PIO_THRESHOLD > 0))
1338		sprintf(pio_limit_string, " (>%d)", PIO_THRESHOLD);
1339	dev_info(dev, "loaded - DMA %s%s\n",
1340		 host->use_dma ? "enabled" : "disabled", pio_limit_string);
1341
1342	return 0;
1343}
1344
1345static int bcm2835_probe(struct platform_device *pdev)
1346{
1347	struct device *dev = &pdev->dev;
1348	struct clk *clk;
1349	struct bcm2835_host *host;
1350	struct mmc_host *mmc;
1351	const __be32 *regaddr_p;
1352	int ret;
1353
1354	dev_dbg(dev, "%s\n", __func__);
1355	mmc = mmc_alloc_host(sizeof(*host), dev);
1356	if (!mmc)
1357		return -ENOMEM;
1358
1359	mmc->ops = &bcm2835_ops;
1360	host = mmc_priv(mmc);
1361	host->pdev = pdev;
1362	spin_lock_init(&host->lock);
1363
1364	host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
1365	if (IS_ERR(host->ioaddr)) {
1366		ret = PTR_ERR(host->ioaddr);
1367		goto err;
1368	}
1369
1370	/* Parse OF address directly to get the physical address for
1371	 * DMA to our registers.
1372	 */
1373	regaddr_p = of_get_address(pdev->dev.of_node, 0, NULL, NULL);
1374	if (!regaddr_p) {
1375		dev_err(dev, "Can't get phys address\n");
1376		ret = -EINVAL;
1377		goto err;
1378	}
1379
1380	host->phys_addr = be32_to_cpup(regaddr_p);
1381
1382	host->dma_chan = NULL;
1383	host->dma_desc = NULL;
1384
1385	host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
1386	if (IS_ERR(host->dma_chan_rxtx)) {
1387		ret = PTR_ERR(host->dma_chan_rxtx);
1388		host->dma_chan_rxtx = NULL;
1389
1390		if (ret == -EPROBE_DEFER)
1391			goto err;
1392
1393		/* Ignore errors to fall back to PIO mode */
1394	}
1395
1396
1397	clk = devm_clk_get(dev, NULL);
1398	if (IS_ERR(clk)) {
1399		ret = dev_err_probe(dev, PTR_ERR(clk), "could not get clk\n");
1400		goto err;
1401	}
1402
1403	host->max_clk = clk_get_rate(clk);
1404
1405	host->irq = platform_get_irq(pdev, 0);
1406	if (host->irq < 0) {
1407		ret = host->irq;
1408		goto err;
1409	}
1410
1411	ret = mmc_of_parse(mmc);
1412	if (ret)
1413		goto err;
1414
1415	ret = bcm2835_add_host(host);
1416	if (ret)
1417		goto err;
1418
1419	platform_set_drvdata(pdev, host);
1420
1421	dev_dbg(dev, "%s -> OK\n", __func__);
1422
1423	return 0;
1424
1425err:
1426	dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1427	if (host->dma_chan_rxtx)
1428		dma_release_channel(host->dma_chan_rxtx);
1429	mmc_free_host(mmc);
1430
1431	return ret;
1432}
1433
1434static void bcm2835_remove(struct platform_device *pdev)
1435{
1436	struct bcm2835_host *host = platform_get_drvdata(pdev);
1437	struct mmc_host *mmc = mmc_from_priv(host);
1438
1439	mmc_remove_host(mmc);
1440
1441	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
1442
1443	free_irq(host->irq, host);
1444
1445	cancel_work_sync(&host->dma_work);
1446	cancel_delayed_work_sync(&host->timeout_work);
1447
1448	if (host->dma_chan_rxtx)
1449		dma_release_channel(host->dma_chan_rxtx);
1450
1451	mmc_free_host(mmc);
1452}
1453
1454static const struct of_device_id bcm2835_match[] = {
1455	{ .compatible = "brcm,bcm2835-sdhost" },
1456	{ }
1457};
1458MODULE_DEVICE_TABLE(of, bcm2835_match);
1459
1460static struct platform_driver bcm2835_driver = {
1461	.probe      = bcm2835_probe,
1462	.remove_new = bcm2835_remove,
1463	.driver     = {
1464		.name		= "sdhost-bcm2835",
1465		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
1466		.of_match_table	= bcm2835_match,
1467	},
1468};
1469module_platform_driver(bcm2835_driver);
1470
1471MODULE_ALIAS("platform:sdhost-bcm2835");
1472MODULE_DESCRIPTION("BCM2835 SDHost driver");
1473MODULE_LICENSE("GPL v2");
1474MODULE_AUTHOR("Phil Elwell");
1475