• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mmc/host/
1/*
2 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Thanks to the following companies for their support:
12 *
13 *     - JMicron (hardware and technical support)
14 */
15
16#include <linux/delay.h>
17#include <linux/highmem.h>
18#include <linux/io.h>
19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
21#include <linux/scatterlist.h>
22#include <linux/regulator/consumer.h>
23
24#include <linux/leds.h>
25
26#include <linux/mmc/host.h>
27
28#include "sdhci.h"
29
30#define DRIVER_NAME "sdhci"
31
32#define DBG(f, x...) \
33	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
34
35#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
36	defined(CONFIG_MMC_SDHCI_MODULE))
37#define SDHCI_USE_LEDS_CLASS
38#endif
39
40static unsigned int debug_quirks = 0;
41
42static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
43static void sdhci_finish_data(struct sdhci_host *);
44
45static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46static void sdhci_finish_command(struct sdhci_host *);
47
48static void sdhci_dumpregs(struct sdhci_host *host)
49{
50	printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
51
52	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
53		sdhci_readl(host, SDHCI_DMA_ADDRESS),
54		sdhci_readw(host, SDHCI_HOST_VERSION));
55	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
56		sdhci_readw(host, SDHCI_BLOCK_SIZE),
57		sdhci_readw(host, SDHCI_BLOCK_COUNT));
58	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
59		sdhci_readl(host, SDHCI_ARGUMENT),
60		sdhci_readw(host, SDHCI_TRANSFER_MODE));
61	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
62		sdhci_readl(host, SDHCI_PRESENT_STATE),
63		sdhci_readb(host, SDHCI_HOST_CONTROL));
64	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
65		sdhci_readb(host, SDHCI_POWER_CONTROL),
66		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
67	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
68		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
69		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
70	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
71		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
72		sdhci_readl(host, SDHCI_INT_STATUS));
73	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
74		sdhci_readl(host, SDHCI_INT_ENABLE),
75		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
76	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
77		sdhci_readw(host, SDHCI_ACMD12_ERR),
78		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
79	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
80		sdhci_readl(host, SDHCI_CAPABILITIES),
81		sdhci_readl(host, SDHCI_MAX_CURRENT));
82
83	if (host->flags & SDHCI_USE_ADMA)
84		printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
85		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
86		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
87
88	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
89}
90
91/*****************************************************************************\
92 *                                                                           *
93 * Low level functions                                                       *
94 *                                                                           *
95\*****************************************************************************/
96
97static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
98{
99	u32 ier;
100
101	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
102	ier &= ~clear;
103	ier |= set;
104	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
105	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
106}
107
108static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
109{
110	sdhci_clear_set_irqs(host, 0, irqs);
111}
112
113static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
114{
115	sdhci_clear_set_irqs(host, irqs, 0);
116}
117
118static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
119{
120	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
121
122	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
123		return;
124
125	if (enable)
126		sdhci_unmask_irqs(host, irqs);
127	else
128		sdhci_mask_irqs(host, irqs);
129}
130
131static void sdhci_enable_card_detection(struct sdhci_host *host)
132{
133	sdhci_set_card_detection(host, true);
134}
135
136static void sdhci_disable_card_detection(struct sdhci_host *host)
137{
138	sdhci_set_card_detection(host, false);
139}
140
141static void sdhci_reset(struct sdhci_host *host, u8 mask)
142{
143	unsigned long timeout;
144	u32 uninitialized_var(ier);
145
146	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
147		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
148			SDHCI_CARD_PRESENT))
149			return;
150	}
151
152	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
153		ier = sdhci_readl(host, SDHCI_INT_ENABLE);
154
155	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
156
157	if (mask & SDHCI_RESET_ALL)
158		host->clock = 0;
159
160	/* Wait max 100 ms */
161	timeout = 100;
162
163	/* hw clears the bit when it's done */
164	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
165		if (timeout == 0) {
166			printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
167				mmc_hostname(host->mmc), (int)mask);
168			sdhci_dumpregs(host);
169			return;
170		}
171		timeout--;
172		mdelay(1);
173	}
174
175	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
176		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
177}
178
179static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
180
181static void sdhci_init(struct sdhci_host *host, int soft)
182{
183	if (soft)
184		sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
185	else
186		sdhci_reset(host, SDHCI_RESET_ALL);
187
188	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
189		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
190		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
191		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
192		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
193
194	if (soft) {
195		/* force clock reconfiguration */
196		host->clock = 0;
197		sdhci_set_ios(host->mmc, &host->mmc->ios);
198	}
199}
200
201static void sdhci_reinit(struct sdhci_host *host)
202{
203	sdhci_init(host, 0);
204	sdhci_enable_card_detection(host);
205}
206
207static void sdhci_activate_led(struct sdhci_host *host)
208{
209	u8 ctrl;
210
211	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
212	ctrl |= SDHCI_CTRL_LED;
213	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
214}
215
216static void sdhci_deactivate_led(struct sdhci_host *host)
217{
218	u8 ctrl;
219
220	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
221	ctrl &= ~SDHCI_CTRL_LED;
222	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
223}
224
225#ifdef SDHCI_USE_LEDS_CLASS
226static void sdhci_led_control(struct led_classdev *led,
227	enum led_brightness brightness)
228{
229	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
230	unsigned long flags;
231
232	spin_lock_irqsave(&host->lock, flags);
233
234	if (brightness == LED_OFF)
235		sdhci_deactivate_led(host);
236	else
237		sdhci_activate_led(host);
238
239	spin_unlock_irqrestore(&host->lock, flags);
240}
241#endif
242
243/*****************************************************************************\
244 *                                                                           *
245 * Core functions                                                            *
246 *                                                                           *
247\*****************************************************************************/
248
249static void sdhci_read_block_pio(struct sdhci_host *host)
250{
251	unsigned long flags;
252	size_t blksize, len, chunk;
253	u32 uninitialized_var(scratch);
254	u8 *buf;
255
256	DBG("PIO reading\n");
257
258	blksize = host->data->blksz;
259	chunk = 0;
260
261	local_irq_save(flags);
262
263	while (blksize) {
264		if (!sg_miter_next(&host->sg_miter))
265			BUG();
266
267		len = min(host->sg_miter.length, blksize);
268
269		blksize -= len;
270		host->sg_miter.consumed = len;
271
272		buf = host->sg_miter.addr;
273
274		while (len) {
275			if (chunk == 0) {
276				scratch = sdhci_readl(host, SDHCI_BUFFER);
277				chunk = 4;
278			}
279
280			*buf = scratch & 0xFF;
281
282			buf++;
283			scratch >>= 8;
284			chunk--;
285			len--;
286		}
287	}
288
289	sg_miter_stop(&host->sg_miter);
290
291	local_irq_restore(flags);
292}
293
294static void sdhci_write_block_pio(struct sdhci_host *host)
295{
296	unsigned long flags;
297	size_t blksize, len, chunk;
298	u32 scratch;
299	u8 *buf;
300
301	DBG("PIO writing\n");
302
303	blksize = host->data->blksz;
304	chunk = 0;
305	scratch = 0;
306
307	local_irq_save(flags);
308
309	while (blksize) {
310		if (!sg_miter_next(&host->sg_miter))
311			BUG();
312
313		len = min(host->sg_miter.length, blksize);
314
315		blksize -= len;
316		host->sg_miter.consumed = len;
317
318		buf = host->sg_miter.addr;
319
320		while (len) {
321			scratch |= (u32)*buf << (chunk * 8);
322
323			buf++;
324			chunk++;
325			len--;
326
327			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
328				sdhci_writel(host, scratch, SDHCI_BUFFER);
329				chunk = 0;
330				scratch = 0;
331			}
332		}
333	}
334
335	sg_miter_stop(&host->sg_miter);
336
337	local_irq_restore(flags);
338}
339
340static void sdhci_transfer_pio(struct sdhci_host *host)
341{
342	u32 mask;
343
344	BUG_ON(!host->data);
345
346	if (host->blocks == 0)
347		return;
348
349	if (host->data->flags & MMC_DATA_READ)
350		mask = SDHCI_DATA_AVAILABLE;
351	else
352		mask = SDHCI_SPACE_AVAILABLE;
353
354	/*
355	 * Some controllers (JMicron JMB38x) mess up the buffer bits
356	 * for transfers < 4 bytes. As long as it is just one block,
357	 * we can ignore the bits.
358	 */
359	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
360		(host->data->blocks == 1))
361		mask = ~0;
362
363	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
364		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
365			udelay(100);
366
367		if (host->data->flags & MMC_DATA_READ)
368			sdhci_read_block_pio(host);
369		else
370			sdhci_write_block_pio(host);
371
372		host->blocks--;
373		if (host->blocks == 0)
374			break;
375	}
376
377	DBG("PIO transfer complete.\n");
378}
379
380static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
381{
382	local_irq_save(*flags);
383	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
384}
385
386static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
387{
388	kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
389	local_irq_restore(*flags);
390}
391
392static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
393{
394	__le32 *dataddr = (__le32 __force *)(desc + 4);
395	__le16 *cmdlen = (__le16 __force *)desc;
396
397	/* SDHCI specification says ADMA descriptors should be 4 byte
398	 * aligned, so using 16 or 32bit operations should be safe. */
399
400	cmdlen[0] = cpu_to_le16(cmd);
401	cmdlen[1] = cpu_to_le16(len);
402
403	dataddr[0] = cpu_to_le32(addr);
404}
405
406static int sdhci_adma_table_pre(struct sdhci_host *host,
407	struct mmc_data *data)
408{
409	int direction;
410
411	u8 *desc;
412	u8 *align;
413	dma_addr_t addr;
414	dma_addr_t align_addr;
415	int len, offset;
416
417	struct scatterlist *sg;
418	int i;
419	char *buffer;
420	unsigned long flags;
421
422	/*
423	 * The spec does not specify endianness of descriptor table.
424	 * We currently guess that it is LE.
425	 */
426
427	if (data->flags & MMC_DATA_READ)
428		direction = DMA_FROM_DEVICE;
429	else
430		direction = DMA_TO_DEVICE;
431
432	/*
433	 * The ADMA descriptor table is mapped further down as we
434	 * need to fill it with data first.
435	 */
436
437	host->align_addr = dma_map_single(mmc_dev(host->mmc),
438		host->align_buffer, 128 * 4, direction);
439	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
440		goto fail;
441	BUG_ON(host->align_addr & 0x3);
442
443	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
444		data->sg, data->sg_len, direction);
445	if (host->sg_count == 0)
446		goto unmap_align;
447
448	desc = host->adma_desc;
449	align = host->align_buffer;
450
451	align_addr = host->align_addr;
452
453	for_each_sg(data->sg, sg, host->sg_count, i) {
454		addr = sg_dma_address(sg);
455		len = sg_dma_len(sg);
456
457		/*
458		 * The SDHCI specification states that ADMA
459		 * addresses must be 32-bit aligned. If they
460		 * aren't, then we use a bounce buffer for
461		 * the (up to three) bytes that screw up the
462		 * alignment.
463		 */
464		offset = (4 - (addr & 0x3)) & 0x3;
465		if (offset) {
466			if (data->flags & MMC_DATA_WRITE) {
467				buffer = sdhci_kmap_atomic(sg, &flags);
468				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
469				memcpy(align, buffer, offset);
470				sdhci_kunmap_atomic(buffer, &flags);
471			}
472
473			/* tran, valid */
474			sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
475
476			BUG_ON(offset > 65536);
477
478			align += 4;
479			align_addr += 4;
480
481			desc += 8;
482
483			addr += offset;
484			len -= offset;
485		}
486
487		BUG_ON(len > 65536);
488
489		/* tran, valid */
490		sdhci_set_adma_desc(desc, addr, len, 0x21);
491		desc += 8;
492
493		/*
494		 * If this triggers then we have a calculation bug
495		 * somewhere. :/
496		 */
497		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
498	}
499
500	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
501		/*
502		* Mark the last descriptor as the terminating descriptor
503		*/
504		if (desc != host->adma_desc) {
505			desc -= 8;
506			desc[0] |= 0x2; /* end */
507		}
508	} else {
509		/*
510		* Add a terminating entry.
511		*/
512
513		/* nop, end, valid */
514		sdhci_set_adma_desc(desc, 0, 0, 0x3);
515	}
516
517	/*
518	 * Resync align buffer as we might have changed it.
519	 */
520	if (data->flags & MMC_DATA_WRITE) {
521		dma_sync_single_for_device(mmc_dev(host->mmc),
522			host->align_addr, 128 * 4, direction);
523	}
524
525	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
526		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
527	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
528		goto unmap_entries;
529	BUG_ON(host->adma_addr & 0x3);
530
531	return 0;
532
533unmap_entries:
534	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
535		data->sg_len, direction);
536unmap_align:
537	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
538		128 * 4, direction);
539fail:
540	return -EINVAL;
541}
542
543static void sdhci_adma_table_post(struct sdhci_host *host,
544	struct mmc_data *data)
545{
546	int direction;
547
548	struct scatterlist *sg;
549	int i, size;
550	u8 *align;
551	char *buffer;
552	unsigned long flags;
553
554	if (data->flags & MMC_DATA_READ)
555		direction = DMA_FROM_DEVICE;
556	else
557		direction = DMA_TO_DEVICE;
558
559	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
560		(128 * 2 + 1) * 4, DMA_TO_DEVICE);
561
562	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
563		128 * 4, direction);
564
565	if (data->flags & MMC_DATA_READ) {
566		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
567			data->sg_len, direction);
568
569		align = host->align_buffer;
570
571		for_each_sg(data->sg, sg, host->sg_count, i) {
572			if (sg_dma_address(sg) & 0x3) {
573				size = 4 - (sg_dma_address(sg) & 0x3);
574
575				buffer = sdhci_kmap_atomic(sg, &flags);
576				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
577				memcpy(buffer, align, size);
578				sdhci_kunmap_atomic(buffer, &flags);
579
580				align += 4;
581			}
582		}
583	}
584
585	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
586		data->sg_len, direction);
587}
588
589static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
590{
591	u8 count;
592	unsigned target_timeout, current_timeout;
593
594	/*
595	 * If the host controller provides us with an incorrect timeout
596	 * value, just skip the check and use 0xE.  The hardware may take
597	 * longer to time out, but that's much better than having a too-short
598	 * timeout value.
599	 */
600	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
601		return 0xE;
602
603	/* timeout in us */
604	target_timeout = data->timeout_ns / 1000 +
605		data->timeout_clks / host->clock;
606
607	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
608		host->timeout_clk = host->clock / 1000;
609
610	/*
611	 * Figure out needed cycles.
612	 * We do this in steps in order to fit inside a 32 bit int.
613	 * The first step is the minimum timeout, which will have a
614	 * minimum resolution of 6 bits:
615	 * (1) 2^13*1000 > 2^22,
616	 * (2) host->timeout_clk < 2^16
617	 *     =>
618	 *     (1) / (2) > 2^6
619	 */
620	count = 0;
621	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
622	while (current_timeout < target_timeout) {
623		count++;
624		current_timeout <<= 1;
625		if (count >= 0xF)
626			break;
627	}
628
629	if (count >= 0xF) {
630		printk(KERN_WARNING "%s: Too large timeout requested!\n",
631			mmc_hostname(host->mmc));
632		count = 0xE;
633	}
634
635	return count;
636}
637
638static void sdhci_set_transfer_irqs(struct sdhci_host *host)
639{
640	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
641	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
642
643	if (host->flags & SDHCI_REQ_USE_DMA)
644		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
645	else
646		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
647}
648
649static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
650{
651	u8 count;
652	u8 ctrl;
653	int ret;
654
655	WARN_ON(host->data);
656
657	if (data == NULL)
658		return;
659
660	/* Sanity checks */
661	BUG_ON(data->blksz * data->blocks > 524288);
662	BUG_ON(data->blksz > host->mmc->max_blk_size);
663	BUG_ON(data->blocks > 65535);
664
665	host->data = data;
666	host->data_early = 0;
667
668	count = sdhci_calc_timeout(host, data);
669	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
670
671	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
672		host->flags |= SDHCI_REQ_USE_DMA;
673
674	if (host->flags & SDHCI_REQ_USE_DMA) {
675		int broken, i;
676		struct scatterlist *sg;
677
678		broken = 0;
679		if (host->flags & SDHCI_USE_ADMA) {
680			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
681				broken = 1;
682		} else {
683			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
684				broken = 1;
685		}
686
687		if (unlikely(broken)) {
688			for_each_sg(data->sg, sg, data->sg_len, i) {
689				if (sg->length & 0x3) {
690					DBG("Reverting to PIO because of "
691						"transfer size (%d)\n",
692						sg->length);
693					host->flags &= ~SDHCI_REQ_USE_DMA;
694					break;
695				}
696			}
697		}
698	}
699
700	/*
701	 * The assumption here being that alignment is the same after
702	 * translation to device address space.
703	 */
704	if (host->flags & SDHCI_REQ_USE_DMA) {
705		int broken, i;
706		struct scatterlist *sg;
707
708		broken = 0;
709		if (host->flags & SDHCI_USE_ADMA) {
710			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
711				broken = 1;
712		} else {
713			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
714				broken = 1;
715		}
716
717		if (unlikely(broken)) {
718			for_each_sg(data->sg, sg, data->sg_len, i) {
719				if (sg->offset & 0x3) {
720					DBG("Reverting to PIO because of "
721						"bad alignment\n");
722					host->flags &= ~SDHCI_REQ_USE_DMA;
723					break;
724				}
725			}
726		}
727	}
728
729	if (host->flags & SDHCI_REQ_USE_DMA) {
730		if (host->flags & SDHCI_USE_ADMA) {
731			ret = sdhci_adma_table_pre(host, data);
732			if (ret) {
733				/*
734				 * This only happens when someone fed
735				 * us an invalid request.
736				 */
737				WARN_ON(1);
738				host->flags &= ~SDHCI_REQ_USE_DMA;
739			} else {
740				sdhci_writel(host, host->adma_addr,
741					SDHCI_ADMA_ADDRESS);
742			}
743		} else {
744			int sg_cnt;
745
746			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
747					data->sg, data->sg_len,
748					(data->flags & MMC_DATA_READ) ?
749						DMA_FROM_DEVICE :
750						DMA_TO_DEVICE);
751			if (sg_cnt == 0) {
752				/*
753				 * This only happens when someone fed
754				 * us an invalid request.
755				 */
756				WARN_ON(1);
757				host->flags &= ~SDHCI_REQ_USE_DMA;
758			} else {
759				WARN_ON(sg_cnt != 1);
760				sdhci_writel(host, sg_dma_address(data->sg),
761					SDHCI_DMA_ADDRESS);
762			}
763		}
764	}
765
766	/*
767	 * Always adjust the DMA selection as some controllers
768	 * (e.g. JMicron) can't do PIO properly when the selection
769	 * is ADMA.
770	 */
771	if (host->version >= SDHCI_SPEC_200) {
772		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
773		ctrl &= ~SDHCI_CTRL_DMA_MASK;
774		if ((host->flags & SDHCI_REQ_USE_DMA) &&
775			(host->flags & SDHCI_USE_ADMA))
776			ctrl |= SDHCI_CTRL_ADMA32;
777		else
778			ctrl |= SDHCI_CTRL_SDMA;
779		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
780	}
781
782	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
783		int flags;
784
785		flags = SG_MITER_ATOMIC;
786		if (host->data->flags & MMC_DATA_READ)
787			flags |= SG_MITER_TO_SG;
788		else
789			flags |= SG_MITER_FROM_SG;
790		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
791		host->blocks = data->blocks;
792	}
793
794	sdhci_set_transfer_irqs(host);
795
796	/* We do not handle DMA boundaries, so set it to max (512 KiB) */
797	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
798	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
799}
800
801static void sdhci_set_transfer_mode(struct sdhci_host *host,
802	struct mmc_data *data)
803{
804	u16 mode;
805
806	if (data == NULL)
807		return;
808
809	WARN_ON(!host->data);
810
811	mode = SDHCI_TRNS_BLK_CNT_EN;
812	if (data->blocks > 1) {
813		if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
814			mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
815		else
816			mode |= SDHCI_TRNS_MULTI;
817	}
818	if (data->flags & MMC_DATA_READ)
819		mode |= SDHCI_TRNS_READ;
820	if (host->flags & SDHCI_REQ_USE_DMA)
821		mode |= SDHCI_TRNS_DMA;
822
823	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
824}
825
826static void sdhci_finish_data(struct sdhci_host *host)
827{
828	struct mmc_data *data;
829
830	BUG_ON(!host->data);
831
832	data = host->data;
833	host->data = NULL;
834
835	if (host->flags & SDHCI_REQ_USE_DMA) {
836		if (host->flags & SDHCI_USE_ADMA)
837			sdhci_adma_table_post(host, data);
838		else {
839			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
840				data->sg_len, (data->flags & MMC_DATA_READ) ?
841					DMA_FROM_DEVICE : DMA_TO_DEVICE);
842		}
843	}
844
845	/*
846	 * The specification states that the block count register must
847	 * be updated, but it does not specify at what point in the
848	 * data flow. That makes the register entirely useless to read
849	 * back so we have to assume that nothing made it to the card
850	 * in the event of an error.
851	 */
852	if (data->error)
853		data->bytes_xfered = 0;
854	else
855		data->bytes_xfered = data->blksz * data->blocks;
856
857	if (data->stop) {
858		/*
859		 * The controller needs a reset of internal state machines
860		 * upon error conditions.
861		 */
862		if (data->error) {
863			sdhci_reset(host, SDHCI_RESET_CMD);
864			sdhci_reset(host, SDHCI_RESET_DATA);
865		}
866
867		sdhci_send_command(host, data->stop);
868	} else
869		tasklet_schedule(&host->finish_tasklet);
870}
871
872static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
873{
874	int flags;
875	u32 mask;
876	unsigned long timeout;
877
878	WARN_ON(host->cmd);
879
880	/* Wait max 10 ms */
881	timeout = 10;
882
883	mask = SDHCI_CMD_INHIBIT;
884	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
885		mask |= SDHCI_DATA_INHIBIT;
886
887	/* We shouldn't wait for data inihibit for stop commands, even
888	   though they might use busy signaling */
889	if (host->mrq->data && (cmd == host->mrq->data->stop))
890		mask &= ~SDHCI_DATA_INHIBIT;
891
892	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
893		if (timeout == 0) {
894			printk(KERN_ERR "%s: Controller never released "
895				"inhibit bit(s).\n", mmc_hostname(host->mmc));
896			sdhci_dumpregs(host);
897			cmd->error = -EIO;
898			tasklet_schedule(&host->finish_tasklet);
899			return;
900		}
901		timeout--;
902		mdelay(1);
903	}
904
905	mod_timer(&host->timer, jiffies + 10 * HZ);
906
907	host->cmd = cmd;
908
909	sdhci_prepare_data(host, cmd->data);
910
911	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
912
913	sdhci_set_transfer_mode(host, cmd->data);
914
915	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
916		printk(KERN_ERR "%s: Unsupported response type!\n",
917			mmc_hostname(host->mmc));
918		cmd->error = -EINVAL;
919		tasklet_schedule(&host->finish_tasklet);
920		return;
921	}
922
923	if (!(cmd->flags & MMC_RSP_PRESENT))
924		flags = SDHCI_CMD_RESP_NONE;
925	else if (cmd->flags & MMC_RSP_136)
926		flags = SDHCI_CMD_RESP_LONG;
927	else if (cmd->flags & MMC_RSP_BUSY)
928		flags = SDHCI_CMD_RESP_SHORT_BUSY;
929	else
930		flags = SDHCI_CMD_RESP_SHORT;
931
932	if (cmd->flags & MMC_RSP_CRC)
933		flags |= SDHCI_CMD_CRC;
934	if (cmd->flags & MMC_RSP_OPCODE)
935		flags |= SDHCI_CMD_INDEX;
936	if (cmd->data)
937		flags |= SDHCI_CMD_DATA;
938
939	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
940}
941
942static void sdhci_finish_command(struct sdhci_host *host)
943{
944	int i;
945
946	BUG_ON(host->cmd == NULL);
947
948	if (host->cmd->flags & MMC_RSP_PRESENT) {
949		if (host->cmd->flags & MMC_RSP_136) {
950			/* CRC is stripped so we need to do some shifting. */
951			for (i = 0;i < 4;i++) {
952				host->cmd->resp[i] = sdhci_readl(host,
953					SDHCI_RESPONSE + (3-i)*4) << 8;
954				if (i != 3)
955					host->cmd->resp[i] |=
956						sdhci_readb(host,
957						SDHCI_RESPONSE + (3-i)*4-1);
958			}
959		} else {
960			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
961		}
962	}
963
964	host->cmd->error = 0;
965
966	if (host->data && host->data_early)
967		sdhci_finish_data(host);
968
969	if (!host->cmd->data)
970		tasklet_schedule(&host->finish_tasklet);
971
972	host->cmd = NULL;
973}
974
975static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
976{
977	int div;
978	u16 clk;
979	unsigned long timeout;
980
981	if (clock == host->clock)
982		return;
983
984	if (host->ops->set_clock) {
985		host->ops->set_clock(host, clock);
986		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
987			return;
988	}
989
990	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
991
992	if (clock == 0)
993		goto out;
994
995	for (div = 1;div < 256;div *= 2) {
996		if ((host->max_clk / div) <= clock)
997			break;
998	}
999	div >>= 1;
1000
1001	clk = div << SDHCI_DIVIDER_SHIFT;
1002	clk |= SDHCI_CLOCK_INT_EN;
1003	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1004
1005	/* Wait max 20 ms */
1006	timeout = 20;
1007	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1008		& SDHCI_CLOCK_INT_STABLE)) {
1009		if (timeout == 0) {
1010			printk(KERN_ERR "%s: Internal clock never "
1011				"stabilised.\n", mmc_hostname(host->mmc));
1012			sdhci_dumpregs(host);
1013			return;
1014		}
1015		timeout--;
1016		mdelay(1);
1017	}
1018
1019	clk |= SDHCI_CLOCK_CARD_EN;
1020	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1021
1022out:
1023	host->clock = clock;
1024}
1025
1026static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1027{
1028	u8 pwr;
1029
1030	if (power == (unsigned short)-1)
1031		pwr = 0;
1032	else {
1033		switch (1 << power) {
1034		case MMC_VDD_165_195:
1035			pwr = SDHCI_POWER_180;
1036			break;
1037		case MMC_VDD_29_30:
1038		case MMC_VDD_30_31:
1039			pwr = SDHCI_POWER_300;
1040			break;
1041		case MMC_VDD_32_33:
1042		case MMC_VDD_33_34:
1043			pwr = SDHCI_POWER_330;
1044			break;
1045		default:
1046			BUG();
1047		}
1048	}
1049
1050	if (host->pwr == pwr)
1051		return;
1052
1053	host->pwr = pwr;
1054
1055	if (pwr == 0) {
1056		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1057		return;
1058	}
1059
1060	/*
1061	 * Spec says that we should clear the power reg before setting
1062	 * a new value. Some controllers don't seem to like this though.
1063	 */
1064	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1065		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1066
1067	/*
1068	 * At least the Marvell CaFe chip gets confused if we set the voltage
1069	 * and set turn on power at the same time, so set the voltage first.
1070	 */
1071	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1072		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1073
1074	pwr |= SDHCI_POWER_ON;
1075
1076	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1077
1078	/*
1079	 * Some controllers need an extra 10ms delay of 10ms before they
1080	 * can apply clock after applying power
1081	 */
1082	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1083		mdelay(10);
1084}
1085
1086/*****************************************************************************\
1087 *                                                                           *
1088 * MMC callbacks                                                             *
1089 *                                                                           *
1090\*****************************************************************************/
1091
1092static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1093{
1094	struct sdhci_host *host;
1095	bool present;
1096	unsigned long flags;
1097
1098	host = mmc_priv(mmc);
1099
1100	spin_lock_irqsave(&host->lock, flags);
1101
1102	WARN_ON(host->mrq != NULL);
1103
1104#ifndef SDHCI_USE_LEDS_CLASS
1105	sdhci_activate_led(host);
1106#endif
1107	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
1108		if (mrq->stop) {
1109			mrq->data->stop = NULL;
1110			mrq->stop = NULL;
1111		}
1112	}
1113
1114	host->mrq = mrq;
1115
1116	/* If polling, assume that the card is always present. */
1117	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1118		present = true;
1119	else
1120		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1121				SDHCI_CARD_PRESENT;
1122
1123	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1124		host->mrq->cmd->error = -ENOMEDIUM;
1125		tasklet_schedule(&host->finish_tasklet);
1126	} else
1127		sdhci_send_command(host, mrq->cmd);
1128
1129	mmiowb();
1130	spin_unlock_irqrestore(&host->lock, flags);
1131}
1132
1133static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1134{
1135	struct sdhci_host *host;
1136	unsigned long flags;
1137	u8 ctrl;
1138
1139	host = mmc_priv(mmc);
1140
1141	spin_lock_irqsave(&host->lock, flags);
1142
1143	if (host->flags & SDHCI_DEVICE_DEAD)
1144		goto out;
1145
1146	/*
1147	 * Reset the chip on each power off.
1148	 * Should clear out any weird states.
1149	 */
1150	if (ios->power_mode == MMC_POWER_OFF) {
1151		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1152		sdhci_reinit(host);
1153	}
1154
1155	sdhci_set_clock(host, ios->clock);
1156
1157	if (ios->power_mode == MMC_POWER_OFF)
1158		sdhci_set_power(host, -1);
1159	else
1160		sdhci_set_power(host, ios->vdd);
1161
1162	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1163
1164	if (ios->bus_width == MMC_BUS_WIDTH_8)
1165		ctrl |= SDHCI_CTRL_8BITBUS;
1166	else
1167		ctrl &= ~SDHCI_CTRL_8BITBUS;
1168
1169	if (ios->bus_width == MMC_BUS_WIDTH_4)
1170		ctrl |= SDHCI_CTRL_4BITBUS;
1171	else
1172		ctrl &= ~SDHCI_CTRL_4BITBUS;
1173
1174	if (ios->timing == MMC_TIMING_SD_HS &&
1175	    !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1176		ctrl |= SDHCI_CTRL_HISPD;
1177	else
1178		ctrl &= ~SDHCI_CTRL_HISPD;
1179
1180	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1181
1182	/*
1183	 * Some (ENE) controllers go apeshit on some ios operation,
1184	 * signalling timeout and CRC errors even on CMD0. Resetting
1185	 * it on each ios seems to solve the problem.
1186	 */
1187	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1188		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1189
1190out:
1191	mmiowb();
1192	spin_unlock_irqrestore(&host->lock, flags);
1193}
1194
1195static int sdhci_get_ro(struct mmc_host *mmc)
1196{
1197	struct sdhci_host *host;
1198	unsigned long flags;
1199	int present;
1200
1201	host = mmc_priv(mmc);
1202
1203	spin_lock_irqsave(&host->lock, flags);
1204
1205	if (host->flags & SDHCI_DEVICE_DEAD)
1206		present = 0;
1207	else
1208		present = sdhci_readl(host, SDHCI_PRESENT_STATE);
1209
1210	spin_unlock_irqrestore(&host->lock, flags);
1211
1212	if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
1213		return !!(present & SDHCI_WRITE_PROTECT);
1214	return !(present & SDHCI_WRITE_PROTECT);
1215}
1216
1217static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1218{
1219	struct sdhci_host *host;
1220	unsigned long flags;
1221
1222	host = mmc_priv(mmc);
1223
1224	spin_lock_irqsave(&host->lock, flags);
1225
1226	if (host->flags & SDHCI_DEVICE_DEAD)
1227		goto out;
1228
1229	if (enable)
1230		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1231	else
1232		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1233out:
1234	mmiowb();
1235
1236	spin_unlock_irqrestore(&host->lock, flags);
1237}
1238
1239static const struct mmc_host_ops sdhci_ops = {
1240	.request	= sdhci_request,
1241	.set_ios	= sdhci_set_ios,
1242	.get_ro		= sdhci_get_ro,
1243	.enable_sdio_irq = sdhci_enable_sdio_irq,
1244};
1245
1246/*****************************************************************************\
1247 *                                                                           *
1248 * Tasklets                                                                  *
1249 *                                                                           *
1250\*****************************************************************************/
1251
1252static void sdhci_tasklet_card(unsigned long param)
1253{
1254	struct sdhci_host *host;
1255	unsigned long flags;
1256
1257	host = (struct sdhci_host*)param;
1258
1259	spin_lock_irqsave(&host->lock, flags);
1260
1261	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1262		if (host->mrq) {
1263			printk(KERN_ERR "%s: Card removed during transfer!\n",
1264				mmc_hostname(host->mmc));
1265			printk(KERN_ERR "%s: Resetting controller.\n",
1266				mmc_hostname(host->mmc));
1267
1268			sdhci_reset(host, SDHCI_RESET_CMD);
1269			sdhci_reset(host, SDHCI_RESET_DATA);
1270
1271			host->mrq->cmd->error = -ENOMEDIUM;
1272			tasklet_schedule(&host->finish_tasklet);
1273		}
1274	}
1275
1276	spin_unlock_irqrestore(&host->lock, flags);
1277
1278	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1279}
1280
1281static void sdhci_tasklet_finish(unsigned long param)
1282{
1283	struct sdhci_host *host;
1284	unsigned long flags;
1285	struct mmc_request *mrq;
1286
1287	host = (struct sdhci_host*)param;
1288
1289	spin_lock_irqsave(&host->lock, flags);
1290
1291	del_timer(&host->timer);
1292
1293	mrq = host->mrq;
1294
1295	/*
1296	 * The controller needs a reset of internal state machines
1297	 * upon error conditions.
1298	 */
1299	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1300		(mrq->cmd->error ||
1301		 (mrq->data && (mrq->data->error ||
1302		  (mrq->data->stop && mrq->data->stop->error))) ||
1303		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1304
1305		/* Some controllers need this kick or reset won't work here */
1306		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1307			unsigned int clock;
1308
1309			/* This is to force an update */
1310			clock = host->clock;
1311			host->clock = 0;
1312			sdhci_set_clock(host, clock);
1313		}
1314
1315		/* Spec says we should do both at the same time, but Ricoh
1316		   controllers do not like that. */
1317		sdhci_reset(host, SDHCI_RESET_CMD);
1318		sdhci_reset(host, SDHCI_RESET_DATA);
1319	}
1320
1321	host->mrq = NULL;
1322	host->cmd = NULL;
1323	host->data = NULL;
1324
1325#ifndef SDHCI_USE_LEDS_CLASS
1326	sdhci_deactivate_led(host);
1327#endif
1328
1329	mmiowb();
1330	spin_unlock_irqrestore(&host->lock, flags);
1331
1332	mmc_request_done(host->mmc, mrq);
1333}
1334
1335static void sdhci_timeout_timer(unsigned long data)
1336{
1337	struct sdhci_host *host;
1338	unsigned long flags;
1339
1340	host = (struct sdhci_host*)data;
1341
1342	spin_lock_irqsave(&host->lock, flags);
1343
1344	if (host->mrq) {
1345		printk(KERN_ERR "%s: Timeout waiting for hardware "
1346			"interrupt.\n", mmc_hostname(host->mmc));
1347		sdhci_dumpregs(host);
1348
1349		if (host->data) {
1350			host->data->error = -ETIMEDOUT;
1351			sdhci_finish_data(host);
1352		} else {
1353			if (host->cmd)
1354				host->cmd->error = -ETIMEDOUT;
1355			else
1356				host->mrq->cmd->error = -ETIMEDOUT;
1357
1358			tasklet_schedule(&host->finish_tasklet);
1359		}
1360	}
1361
1362	mmiowb();
1363	spin_unlock_irqrestore(&host->lock, flags);
1364}
1365
1366/*****************************************************************************\
1367 *                                                                           *
1368 * Interrupt handling                                                        *
1369 *                                                                           *
1370\*****************************************************************************/
1371
1372static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1373{
1374	BUG_ON(intmask == 0);
1375
1376	if (!host->cmd) {
1377		printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1378			"though no command operation was in progress.\n",
1379			mmc_hostname(host->mmc), (unsigned)intmask);
1380		sdhci_dumpregs(host);
1381		return;
1382	}
1383
1384	if (intmask & SDHCI_INT_TIMEOUT)
1385		host->cmd->error = -ETIMEDOUT;
1386	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1387			SDHCI_INT_INDEX))
1388		host->cmd->error = -EILSEQ;
1389
1390	if (host->cmd->error) {
1391		tasklet_schedule(&host->finish_tasklet);
1392		return;
1393	}
1394
1395	/*
1396	 * The host can send and interrupt when the busy state has
1397	 * ended, allowing us to wait without wasting CPU cycles.
1398	 * Unfortunately this is overloaded on the "data complete"
1399	 * interrupt, so we need to take some care when handling
1400	 * it.
1401	 *
1402	 * Note: The 1.0 specification is a bit ambiguous about this
1403	 *       feature so there might be some problems with older
1404	 *       controllers.
1405	 */
1406	if (host->cmd->flags & MMC_RSP_BUSY) {
1407		if (host->cmd->data)
1408			DBG("Cannot wait for busy signal when also "
1409				"doing a data transfer");
1410		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1411			return;
1412
1413		/* The controller does not support the end-of-busy IRQ,
1414		 * fall through and take the SDHCI_INT_RESPONSE */
1415	}
1416
1417	if (intmask & SDHCI_INT_RESPONSE)
1418		sdhci_finish_command(host);
1419}
1420
1421#ifdef DEBUG
1422static void sdhci_show_adma_error(struct sdhci_host *host)
1423{
1424	const char *name = mmc_hostname(host->mmc);
1425	u8 *desc = host->adma_desc;
1426	__le32 *dma;
1427	__le16 *len;
1428	u8 attr;
1429
1430	sdhci_dumpregs(host);
1431
1432	while (true) {
1433		dma = (__le32 *)(desc + 4);
1434		len = (__le16 *)(desc + 2);
1435		attr = *desc;
1436
1437		DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1438		    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1439
1440		desc += 8;
1441
1442		if (attr & 2)
1443			break;
1444	}
1445}
1446#else
1447static void sdhci_show_adma_error(struct sdhci_host *host) { }
1448#endif
1449
1450static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1451{
1452	BUG_ON(intmask == 0);
1453
1454	if (!host->data) {
1455		/*
1456		 * The "data complete" interrupt is also used to
1457		 * indicate that a busy state has ended. See comment
1458		 * above in sdhci_cmd_irq().
1459		 */
1460		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1461			if (intmask & SDHCI_INT_DATA_END) {
1462				sdhci_finish_command(host);
1463				return;
1464			}
1465		}
1466
1467		printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1468			"though no data operation was in progress.\n",
1469			mmc_hostname(host->mmc), (unsigned)intmask);
1470		sdhci_dumpregs(host);
1471
1472		return;
1473	}
1474
1475	if (intmask & SDHCI_INT_DATA_TIMEOUT)
1476		host->data->error = -ETIMEDOUT;
1477	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1478		host->data->error = -EILSEQ;
1479	else if (intmask & SDHCI_INT_ADMA_ERROR) {
1480		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1481		sdhci_show_adma_error(host);
1482		host->data->error = -EIO;
1483	}
1484
1485	if (host->data->error)
1486		sdhci_finish_data(host);
1487	else {
1488		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1489			sdhci_transfer_pio(host);
1490
1491		/*
1492		 * We currently don't do anything fancy with DMA
1493		 * boundaries, but as we can't disable the feature
1494		 * we need to at least restart the transfer.
1495		 */
1496		if (intmask & SDHCI_INT_DMA_END)
1497			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1498				SDHCI_DMA_ADDRESS);
1499
1500		if (intmask & SDHCI_INT_DATA_END) {
1501			if (host->cmd) {
1502				/*
1503				 * Data managed to finish before the
1504				 * command completed. Make sure we do
1505				 * things in the proper order.
1506				 */
1507				host->data_early = 1;
1508			} else {
1509				sdhci_finish_data(host);
1510			}
1511		}
1512	}
1513}
1514
1515static irqreturn_t sdhci_irq(int irq, void *dev_id)
1516{
1517	irqreturn_t result;
1518	struct sdhci_host* host = dev_id;
1519	u32 intmask;
1520	int cardint = 0;
1521
1522	spin_lock(&host->lock);
1523
1524	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1525
1526	if (!intmask || intmask == 0xffffffff) {
1527		result = IRQ_NONE;
1528		goto out;
1529	}
1530
1531	DBG("*** %s got interrupt: 0x%08x\n",
1532		mmc_hostname(host->mmc), intmask);
1533
1534	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1535		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1536			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1537		tasklet_schedule(&host->card_tasklet);
1538	}
1539
1540	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1541
1542	if (intmask & SDHCI_INT_CMD_MASK) {
1543		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1544			SDHCI_INT_STATUS);
1545		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1546	}
1547
1548	if (intmask & SDHCI_INT_DATA_MASK) {
1549		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1550			SDHCI_INT_STATUS);
1551		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1552	}
1553
1554	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1555
1556	intmask &= ~SDHCI_INT_ERROR;
1557
1558	if (intmask & SDHCI_INT_BUS_POWER) {
1559		printk(KERN_ERR "%s: Card is consuming too much power!\n",
1560			mmc_hostname(host->mmc));
1561		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1562	}
1563
1564	intmask &= ~SDHCI_INT_BUS_POWER;
1565
1566	if (intmask & SDHCI_INT_CARD_INT)
1567		cardint = 1;
1568
1569	intmask &= ~SDHCI_INT_CARD_INT;
1570
1571	if (intmask) {
1572		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1573			mmc_hostname(host->mmc), intmask);
1574		sdhci_dumpregs(host);
1575
1576		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1577	}
1578
1579	result = IRQ_HANDLED;
1580
1581	mmiowb();
1582out:
1583	spin_unlock(&host->lock);
1584
1585	/*
1586	 * We have to delay this as it calls back into the driver.
1587	 */
1588	if (cardint)
1589		mmc_signal_sdio_irq(host->mmc);
1590
1591	return result;
1592}
1593
1594/*****************************************************************************\
1595 *                                                                           *
1596 * Suspend/resume                                                            *
1597 *                                                                           *
1598\*****************************************************************************/
1599
1600#ifdef CONFIG_PM
1601
1602int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1603{
1604	int ret;
1605
1606	sdhci_disable_card_detection(host);
1607
1608	ret = mmc_suspend_host(host->mmc);
1609	if (ret)
1610		return ret;
1611
1612	free_irq(host->irq, host);
1613
1614	if (host->vmmc)
1615		ret = regulator_disable(host->vmmc);
1616
1617	return ret;
1618}
1619
1620EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1621
1622int sdhci_resume_host(struct sdhci_host *host)
1623{
1624	int ret;
1625
1626	if (host->vmmc) {
1627		int ret = regulator_enable(host->vmmc);
1628		if (ret)
1629			return ret;
1630	}
1631
1632
1633	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1634		if (host->ops->enable_dma)
1635			host->ops->enable_dma(host);
1636	}
1637
1638	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1639			  mmc_hostname(host->mmc), host);
1640	if (ret)
1641		return ret;
1642
1643	sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
1644	mmiowb();
1645
1646	ret = mmc_resume_host(host->mmc);
1647	sdhci_enable_card_detection(host);
1648
1649	return ret;
1650}
1651
1652EXPORT_SYMBOL_GPL(sdhci_resume_host);
1653
1654#endif /* CONFIG_PM */
1655
1656/*****************************************************************************\
1657 *                                                                           *
1658 * Device allocation/registration                                            *
1659 *                                                                           *
1660\*****************************************************************************/
1661
1662struct sdhci_host *sdhci_alloc_host(struct device *dev,
1663	size_t priv_size)
1664{
1665	struct mmc_host *mmc;
1666	struct sdhci_host *host;
1667
1668	WARN_ON(dev == NULL);
1669
1670	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1671	if (!mmc)
1672		return ERR_PTR(-ENOMEM);
1673
1674	host = mmc_priv(mmc);
1675	host->mmc = mmc;
1676
1677	return host;
1678}
1679
1680EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1681
1682int sdhci_add_host(struct sdhci_host *host)
1683{
1684	struct mmc_host *mmc;
1685	unsigned int caps;
1686	int ret;
1687
1688	WARN_ON(host == NULL);
1689	if (host == NULL)
1690		return -EINVAL;
1691
1692	mmc = host->mmc;
1693
1694	if (debug_quirks)
1695		host->quirks = debug_quirks;
1696
1697	sdhci_reset(host, SDHCI_RESET_ALL);
1698
1699	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1700	host->version = (host->version & SDHCI_SPEC_VER_MASK)
1701				>> SDHCI_SPEC_VER_SHIFT;
1702	if (host->version > SDHCI_SPEC_200) {
1703		printk(KERN_ERR "%s: Unknown controller version (%d). "
1704			"You may experience problems.\n", mmc_hostname(mmc),
1705			host->version);
1706	}
1707
1708	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1709		sdhci_readl(host, SDHCI_CAPABILITIES);
1710
1711	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1712		host->flags |= SDHCI_USE_SDMA;
1713	else if (!(caps & SDHCI_CAN_DO_SDMA))
1714		DBG("Controller doesn't have SDMA capability\n");
1715	else
1716		host->flags |= SDHCI_USE_SDMA;
1717
1718	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1719		(host->flags & SDHCI_USE_SDMA)) {
1720		DBG("Disabling DMA as it is marked broken\n");
1721		host->flags &= ~SDHCI_USE_SDMA;
1722	}
1723
1724	if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1725		host->flags |= SDHCI_USE_ADMA;
1726
1727	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1728		(host->flags & SDHCI_USE_ADMA)) {
1729		DBG("Disabling ADMA as it is marked broken\n");
1730		host->flags &= ~SDHCI_USE_ADMA;
1731	}
1732
1733	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1734		if (host->ops->enable_dma) {
1735			if (host->ops->enable_dma(host)) {
1736				printk(KERN_WARNING "%s: No suitable DMA "
1737					"available. Falling back to PIO.\n",
1738					mmc_hostname(mmc));
1739				host->flags &=
1740					~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
1741			}
1742		}
1743	}
1744
1745	if (host->flags & SDHCI_USE_ADMA) {
1746		/*
1747		 * We need to allocate descriptors for all sg entries
1748		 * (128) and potentially one alignment transfer for
1749		 * each of those entries.
1750		 */
1751		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1752		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1753		if (!host->adma_desc || !host->align_buffer) {
1754			kfree(host->adma_desc);
1755			kfree(host->align_buffer);
1756			printk(KERN_WARNING "%s: Unable to allocate ADMA "
1757				"buffers. Falling back to standard DMA.\n",
1758				mmc_hostname(mmc));
1759			host->flags &= ~SDHCI_USE_ADMA;
1760		}
1761	}
1762
1763	/*
1764	 * If we use DMA, then it's up to the caller to set the DMA
1765	 * mask, but PIO does not need the hw shim so we set a new
1766	 * mask here in that case.
1767	 */
1768	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
1769		host->dma_mask = DMA_BIT_MASK(64);
1770		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1771	}
1772
1773	host->max_clk =
1774		(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1775	host->max_clk *= 1000000;
1776	if (host->max_clk == 0 || host->quirks &
1777			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1778		if (!host->ops->get_max_clock) {
1779			printk(KERN_ERR
1780			       "%s: Hardware doesn't specify base clock "
1781			       "frequency.\n", mmc_hostname(mmc));
1782			return -ENODEV;
1783		}
1784		host->max_clk = host->ops->get_max_clock(host);
1785	}
1786
1787	host->timeout_clk =
1788		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1789	if (host->timeout_clk == 0) {
1790		if (host->ops->get_timeout_clock) {
1791			host->timeout_clk = host->ops->get_timeout_clock(host);
1792		} else if (!(host->quirks &
1793				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
1794			printk(KERN_ERR
1795			       "%s: Hardware doesn't specify timeout clock "
1796			       "frequency.\n", mmc_hostname(mmc));
1797			return -ENODEV;
1798		}
1799	}
1800	if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1801		host->timeout_clk *= 1000;
1802
1803	/*
1804	 * Set host parameters.
1805	 */
1806	mmc->ops = &sdhci_ops;
1807	if (host->ops->get_min_clock)
1808		mmc->f_min = host->ops->get_min_clock(host);
1809	else
1810		mmc->f_min = host->max_clk / 256;
1811	mmc->f_max = host->max_clk;
1812	mmc->caps |= MMC_CAP_SDIO_IRQ;
1813
1814	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1815		mmc->caps |= MMC_CAP_4_BIT_DATA;
1816
1817	if (caps & SDHCI_CAN_DO_HISPD)
1818		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1819
1820	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1821		mmc->caps |= MMC_CAP_NEEDS_POLL;
1822
1823	mmc->ocr_avail = 0;
1824	if (caps & SDHCI_CAN_VDD_330)
1825		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1826	if (caps & SDHCI_CAN_VDD_300)
1827		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1828	if (caps & SDHCI_CAN_VDD_180)
1829		mmc->ocr_avail |= MMC_VDD_165_195;
1830
1831	if (mmc->ocr_avail == 0) {
1832		printk(KERN_ERR "%s: Hardware doesn't report any "
1833			"support voltages.\n", mmc_hostname(mmc));
1834		return -ENODEV;
1835	}
1836
1837	spin_lock_init(&host->lock);
1838
1839	/*
1840	 * Maximum number of segments. Depends on if the hardware
1841	 * can do scatter/gather or not.
1842	 */
1843	if (host->flags & SDHCI_USE_ADMA)
1844		mmc->max_hw_segs = 128;
1845	else if (host->flags & SDHCI_USE_SDMA)
1846		mmc->max_hw_segs = 1;
1847	else /* PIO */
1848		mmc->max_hw_segs = 128;
1849	mmc->max_phys_segs = 128;
1850
1851	/*
1852	 * Maximum number of sectors in one transfer. Limited by DMA boundary
1853	 * size (512KiB).
1854	 */
1855	mmc->max_req_size = 524288;
1856
1857	/*
1858	 * Maximum segment size. Could be one segment with the maximum number
1859	 * of bytes. When doing hardware scatter/gather, each entry cannot
1860	 * be larger than 64 KiB though.
1861	 */
1862	if (host->flags & SDHCI_USE_ADMA)
1863		mmc->max_seg_size = 65536;
1864	else
1865		mmc->max_seg_size = mmc->max_req_size;
1866
1867	/*
1868	 * Maximum block size. This varies from controller to controller and
1869	 * is specified in the capabilities register.
1870	 */
1871	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1872		mmc->max_blk_size = 2;
1873	} else {
1874		mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
1875				SDHCI_MAX_BLOCK_SHIFT;
1876		if (mmc->max_blk_size >= 3) {
1877			printk(KERN_WARNING "%s: Invalid maximum block size, "
1878				"assuming 512 bytes\n", mmc_hostname(mmc));
1879			mmc->max_blk_size = 0;
1880		}
1881	}
1882
1883	mmc->max_blk_size = 512 << mmc->max_blk_size;
1884
1885	/*
1886	 * Maximum block count.
1887	 */
1888	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
1889
1890	/*
1891	 * Init tasklets.
1892	 */
1893	tasklet_init(&host->card_tasklet,
1894		sdhci_tasklet_card, (unsigned long)host);
1895	tasklet_init(&host->finish_tasklet,
1896		sdhci_tasklet_finish, (unsigned long)host);
1897
1898	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1899
1900	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1901		mmc_hostname(mmc), host);
1902	if (ret)
1903		goto untasklet;
1904
1905	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1906	if (IS_ERR(host->vmmc)) {
1907		printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1908		host->vmmc = NULL;
1909	} else {
1910		regulator_enable(host->vmmc);
1911	}
1912
1913	sdhci_init(host, 0);
1914
1915#ifdef CONFIG_MMC_DEBUG
1916	sdhci_dumpregs(host);
1917#endif
1918
1919#ifdef SDHCI_USE_LEDS_CLASS
1920	snprintf(host->led_name, sizeof(host->led_name),
1921		"%s::", mmc_hostname(mmc));
1922	host->led.name = host->led_name;
1923	host->led.brightness = LED_OFF;
1924	host->led.default_trigger = mmc_hostname(mmc);
1925	host->led.brightness_set = sdhci_led_control;
1926
1927	ret = led_classdev_register(mmc_dev(mmc), &host->led);
1928	if (ret)
1929		goto reset;
1930#endif
1931
1932	mmiowb();
1933
1934	mmc_add_host(mmc);
1935
1936	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
1937		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1938		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
1939		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
1940
1941	sdhci_enable_card_detection(host);
1942
1943	return 0;
1944
1945#ifdef SDHCI_USE_LEDS_CLASS
1946reset:
1947	sdhci_reset(host, SDHCI_RESET_ALL);
1948	free_irq(host->irq, host);
1949#endif
1950untasklet:
1951	tasklet_kill(&host->card_tasklet);
1952	tasklet_kill(&host->finish_tasklet);
1953
1954	return ret;
1955}
1956
1957EXPORT_SYMBOL_GPL(sdhci_add_host);
1958
1959void sdhci_remove_host(struct sdhci_host *host, int dead)
1960{
1961	unsigned long flags;
1962
1963	if (dead) {
1964		spin_lock_irqsave(&host->lock, flags);
1965
1966		host->flags |= SDHCI_DEVICE_DEAD;
1967
1968		if (host->mrq) {
1969			printk(KERN_ERR "%s: Controller removed during "
1970				" transfer!\n", mmc_hostname(host->mmc));
1971
1972			host->mrq->cmd->error = -ENOMEDIUM;
1973			tasklet_schedule(&host->finish_tasklet);
1974		}
1975
1976		spin_unlock_irqrestore(&host->lock, flags);
1977	}
1978
1979	sdhci_disable_card_detection(host);
1980
1981	mmc_remove_host(host->mmc);
1982
1983#ifdef SDHCI_USE_LEDS_CLASS
1984	led_classdev_unregister(&host->led);
1985#endif
1986
1987	if (!dead)
1988		sdhci_reset(host, SDHCI_RESET_ALL);
1989
1990	free_irq(host->irq, host);
1991
1992	del_timer_sync(&host->timer);
1993
1994	tasklet_kill(&host->card_tasklet);
1995	tasklet_kill(&host->finish_tasklet);
1996
1997	if (host->vmmc) {
1998		regulator_disable(host->vmmc);
1999		regulator_put(host->vmmc);
2000	}
2001
2002	kfree(host->adma_desc);
2003	kfree(host->align_buffer);
2004
2005	host->adma_desc = NULL;
2006	host->align_buffer = NULL;
2007}
2008
2009EXPORT_SYMBOL_GPL(sdhci_remove_host);
2010
2011void sdhci_free_host(struct sdhci_host *host)
2012{
2013	mmc_free_host(host->mmc);
2014}
2015
2016EXPORT_SYMBOL_GPL(sdhci_free_host);
2017
2018/*****************************************************************************\
2019 *                                                                           *
2020 * Driver init/exit                                                          *
2021 *                                                                           *
2022\*****************************************************************************/
2023
2024static int __init sdhci_drv_init(void)
2025{
2026	printk(KERN_INFO DRIVER_NAME
2027		": Secure Digital Host Controller Interface driver\n");
2028	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2029
2030	return 0;
2031}
2032
2033static void __exit sdhci_drv_exit(void)
2034{
2035}
2036
2037module_init(sdhci_drv_init);
2038module_exit(sdhci_drv_exit);
2039
2040module_param(debug_quirks, uint, 0444);
2041
2042MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2043MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
2044MODULE_LICENSE("GPL");
2045
2046MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
2047