• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/mmc/core/
1/*
2 *  linux/drivers/mmc/core/core.c
3 *
4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25
26#include <linux/mmc/card.h>
27#include <linux/mmc/host.h>
28#include <linux/mmc/mmc.h>
29#include <linux/mmc/sd.h>
30
31#include "core.h"
32#include "bus.h"
33#include "host.h"
34#include "sdio_bus.h"
35
36#include "mmc_ops.h"
37#include "sd_ops.h"
38#include "sdio_ops.h"
39
40static struct workqueue_struct *workqueue;
41
42/*
43 * Enabling software CRCs on the data blocks can be a significant (30%)
44 * performance cost, and for other reasons may not always be desired.
45 * So we allow it it to be disabled.
46 */
47int use_spi_crc = 1;
48module_param(use_spi_crc, bool, 0);
49
50/*
51 * We normally treat cards as removed during suspend if they are not
52 * known to be on a non-removable bus, to avoid the risk of writing
53 * back data to a different card after resume.  Allow this to be
54 * overridden if necessary.
55 */
56#ifdef CONFIG_MMC_UNSAFE_RESUME
57int mmc_assume_removable;
58#else
59int mmc_assume_removable = 1;
60#endif
61module_param_named(removable, mmc_assume_removable, bool, 0644);
62MODULE_PARM_DESC(
63	removable,
64	"MMC/SD cards are removable and may be removed during suspend");
65
66/*
67 * Internal function. Schedule delayed work in the MMC work queue.
68 */
69static int mmc_schedule_delayed_work(struct delayed_work *work,
70				     unsigned long delay)
71{
72	return queue_delayed_work(workqueue, work, delay);
73}
74
75/*
76 * Internal function. Flush all scheduled work from the MMC work queue.
77 */
78static void mmc_flush_scheduled_work(void)
79{
80	flush_workqueue(workqueue);
81}
82
83/**
84 *	mmc_request_done - finish processing an MMC request
85 *	@host: MMC host which completed request
86 *	@mrq: MMC request which request
87 *
88 *	MMC drivers should call this function when they have completed
89 *	their processing of a request.
90 */
91void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
92{
93	struct mmc_command *cmd = mrq->cmd;
94	int err = cmd->error;
95
96	if (err && cmd->retries && mmc_host_is_spi(host)) {
97		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
98			cmd->retries = 0;
99	}
100
101	if (err && cmd->retries) {
102		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
103			mmc_hostname(host), cmd->opcode, err);
104
105		cmd->retries--;
106		cmd->error = 0;
107		host->ops->request(host, mrq);
108	} else {
109		led_trigger_event(host->led, LED_OFF);
110
111		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
112			mmc_hostname(host), cmd->opcode, err,
113			cmd->resp[0], cmd->resp[1],
114			cmd->resp[2], cmd->resp[3]);
115
116		if (mrq->data) {
117			pr_debug("%s:     %d bytes transferred: %d\n",
118				mmc_hostname(host),
119				mrq->data->bytes_xfered, mrq->data->error);
120		}
121
122		if (mrq->stop) {
123			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
124				mmc_hostname(host), mrq->stop->opcode,
125				mrq->stop->error,
126				mrq->stop->resp[0], mrq->stop->resp[1],
127				mrq->stop->resp[2], mrq->stop->resp[3]);
128		}
129
130		if (mrq->done)
131			mrq->done(mrq);
132	}
133}
134
135EXPORT_SYMBOL(mmc_request_done);
136
137static void
138mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
139{
140#ifdef CONFIG_MMC_DEBUG
141	unsigned int i, sz;
142	struct scatterlist *sg;
143#endif
144
145	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
146		 mmc_hostname(host), mrq->cmd->opcode,
147		 mrq->cmd->arg, mrq->cmd->flags);
148
149	if (mrq->data) {
150		pr_debug("%s:     blksz %d blocks %d flags %08x "
151			"tsac %d ms nsac %d\n",
152			mmc_hostname(host), mrq->data->blksz,
153			mrq->data->blocks, mrq->data->flags,
154			mrq->data->timeout_ns / 1000000,
155			mrq->data->timeout_clks);
156	}
157
158	if (mrq->stop) {
159		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
160			 mmc_hostname(host), mrq->stop->opcode,
161			 mrq->stop->arg, mrq->stop->flags);
162	}
163
164	WARN_ON(!host->claimed);
165
166	led_trigger_event(host->led, LED_FULL);
167
168	mrq->cmd->error = 0;
169	mrq->cmd->mrq = mrq;
170	if (mrq->data) {
171		BUG_ON(mrq->data->blksz > host->max_blk_size);
172		BUG_ON(mrq->data->blocks > host->max_blk_count);
173		BUG_ON(mrq->data->blocks * mrq->data->blksz >
174			host->max_req_size);
175
176#ifdef CONFIG_MMC_DEBUG
177		sz = 0;
178		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
179			sz += sg->length;
180		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
181#endif
182
183		mrq->cmd->data = mrq->data;
184		mrq->data->error = 0;
185		mrq->data->mrq = mrq;
186		if (mrq->stop) {
187			mrq->data->stop = mrq->stop;
188			mrq->stop->error = 0;
189			mrq->stop->mrq = mrq;
190		}
191	}
192	host->ops->request(host, mrq);
193}
194
195static void mmc_wait_done(struct mmc_request *mrq)
196{
197	complete(mrq->done_data);
198}
199
200/**
201 *	mmc_wait_for_req - start a request and wait for completion
202 *	@host: MMC host to start command
203 *	@mrq: MMC request to start
204 *
205 *	Start a new MMC custom command request for a host, and wait
206 *	for the command to complete. Does not attempt to parse the
207 *	response.
208 */
209void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
210{
211	DECLARE_COMPLETION_ONSTACK(complete);
212
213	mrq->done_data = &complete;
214	mrq->done = mmc_wait_done;
215
216	mmc_start_request(host, mrq);
217
218	wait_for_completion(&complete);
219}
220
221EXPORT_SYMBOL(mmc_wait_for_req);
222
223/**
224 *	mmc_wait_for_cmd - start a command and wait for completion
225 *	@host: MMC host to start command
226 *	@cmd: MMC command to start
227 *	@retries: maximum number of retries
228 *
229 *	Start a new MMC command for a host, and wait for the command
230 *	to complete.  Return any error that occurred while the command
231 *	was executing.  Do not attempt to parse the response.
232 */
233int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
234{
235	struct mmc_request mrq;
236
237	WARN_ON(!host->claimed);
238
239	memset(&mrq, 0, sizeof(struct mmc_request));
240
241	memset(cmd->resp, 0, sizeof(cmd->resp));
242	cmd->retries = retries;
243
244	mrq.cmd = cmd;
245	cmd->data = NULL;
246
247	mmc_wait_for_req(host, &mrq);
248
249	return cmd->error;
250}
251
252EXPORT_SYMBOL(mmc_wait_for_cmd);
253
254/**
255 *	mmc_set_data_timeout - set the timeout for a data command
256 *	@data: data phase for command
257 *	@card: the MMC card associated with the data transfer
258 *
259 *	Computes the data timeout parameters according to the
260 *	correct algorithm given the card type.
261 */
262void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
263{
264	unsigned int mult;
265
266	/*
267	 * SDIO cards only define an upper 1 s limit on access.
268	 */
269	if (mmc_card_sdio(card)) {
270		data->timeout_ns = 1000000000;
271		data->timeout_clks = 0;
272		return;
273	}
274
275	/*
276	 * SD cards use a 100 multiplier rather than 10
277	 */
278	mult = mmc_card_sd(card) ? 100 : 10;
279
280	/*
281	 * Scale up the multiplier (and therefore the timeout) by
282	 * the r2w factor for writes.
283	 */
284	if (data->flags & MMC_DATA_WRITE)
285		mult <<= card->csd.r2w_factor;
286
287	data->timeout_ns = card->csd.tacc_ns * mult;
288	data->timeout_clks = card->csd.tacc_clks * mult;
289
290	/*
291	 * SD cards also have an upper limit on the timeout.
292	 */
293	if (mmc_card_sd(card)) {
294		unsigned int timeout_us, limit_us;
295
296		timeout_us = data->timeout_ns / 1000;
297		timeout_us += data->timeout_clks * 1000 /
298			(card->host->ios.clock / 1000);
299
300		if (data->flags & MMC_DATA_WRITE)
301			/*
302			 * The limit is really 250 ms, but that is
303			 * insufficient for some crappy cards.
304			 */
305			limit_us = 300000;
306		else
307			limit_us = 100000;
308
309		/*
310		 * SDHC cards always use these fixed values.
311		 */
312		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
313			data->timeout_ns = limit_us * 1000;
314			data->timeout_clks = 0;
315		}
316	}
317	/*
318	 * Some cards need very high timeouts if driven in SPI mode.
319	 * The worst observed timeout was 900ms after writing a
320	 * continuous stream of data until the internal logic
321	 * overflowed.
322	 */
323	if (mmc_host_is_spi(card->host)) {
324		if (data->flags & MMC_DATA_WRITE) {
325			if (data->timeout_ns < 1000000000)
326				data->timeout_ns = 1000000000;	/* 1s */
327		} else {
328			if (data->timeout_ns < 100000000)
329				data->timeout_ns =  100000000;	/* 100ms */
330		}
331	}
332}
333EXPORT_SYMBOL(mmc_set_data_timeout);
334
335/**
336 *	mmc_align_data_size - pads a transfer size to a more optimal value
337 *	@card: the MMC card associated with the data transfer
338 *	@sz: original transfer size
339 *
340 *	Pads the original data size with a number of extra bytes in
341 *	order to avoid controller bugs and/or performance hits
342 *	(e.g. some controllers revert to PIO for certain sizes).
343 *
344 *	Returns the improved size, which might be unmodified.
345 *
346 *	Note that this function is only relevant when issuing a
347 *	single scatter gather entry.
348 */
349unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
350{
351	sz = ((sz + 3) / 4) * 4;
352
353	return sz;
354}
355EXPORT_SYMBOL(mmc_align_data_size);
356
357/**
358 *	mmc_host_enable - enable a host.
359 *	@host: mmc host to enable
360 *
361 *	Hosts that support power saving can use the 'enable' and 'disable'
362 *	methods to exit and enter power saving states. For more information
363 *	see comments for struct mmc_host_ops.
364 */
365int mmc_host_enable(struct mmc_host *host)
366{
367	if (!(host->caps & MMC_CAP_DISABLE))
368		return 0;
369
370	if (host->en_dis_recurs)
371		return 0;
372
373	if (host->nesting_cnt++)
374		return 0;
375
376	cancel_delayed_work_sync(&host->disable);
377
378	if (host->enabled)
379		return 0;
380
381	if (host->ops->enable) {
382		int err;
383
384		host->en_dis_recurs = 1;
385		err = host->ops->enable(host);
386		host->en_dis_recurs = 0;
387
388		if (err) {
389			pr_debug("%s: enable error %d\n",
390				 mmc_hostname(host), err);
391			return err;
392		}
393	}
394	host->enabled = 1;
395	return 0;
396}
397EXPORT_SYMBOL(mmc_host_enable);
398
399static int mmc_host_do_disable(struct mmc_host *host, int lazy)
400{
401	if (host->ops->disable) {
402		int err;
403
404		host->en_dis_recurs = 1;
405		err = host->ops->disable(host, lazy);
406		host->en_dis_recurs = 0;
407
408		if (err < 0) {
409			pr_debug("%s: disable error %d\n",
410				 mmc_hostname(host), err);
411			return err;
412		}
413		if (err > 0) {
414			unsigned long delay = msecs_to_jiffies(err);
415
416			mmc_schedule_delayed_work(&host->disable, delay);
417		}
418	}
419	host->enabled = 0;
420	return 0;
421}
422
423/**
424 *	mmc_host_disable - disable a host.
425 *	@host: mmc host to disable
426 *
427 *	Hosts that support power saving can use the 'enable' and 'disable'
428 *	methods to exit and enter power saving states. For more information
429 *	see comments for struct mmc_host_ops.
430 */
431int mmc_host_disable(struct mmc_host *host)
432{
433	int err;
434
435	if (!(host->caps & MMC_CAP_DISABLE))
436		return 0;
437
438	if (host->en_dis_recurs)
439		return 0;
440
441	if (--host->nesting_cnt)
442		return 0;
443
444	if (!host->enabled)
445		return 0;
446
447	err = mmc_host_do_disable(host, 0);
448	return err;
449}
450EXPORT_SYMBOL(mmc_host_disable);
451
452/**
453 *	__mmc_claim_host - exclusively claim a host
454 *	@host: mmc host to claim
455 *	@abort: whether or not the operation should be aborted
456 *
457 *	Claim a host for a set of operations.  If @abort is non null and
458 *	dereference a non-zero value then this will return prematurely with
459 *	that non-zero value without acquiring the lock.  Returns zero
460 *	with the lock held otherwise.
461 */
462int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
463{
464	DECLARE_WAITQUEUE(wait, current);
465	unsigned long flags;
466	int stop;
467
468	might_sleep();
469
470	add_wait_queue(&host->wq, &wait);
471	spin_lock_irqsave(&host->lock, flags);
472	while (1) {
473		set_current_state(TASK_UNINTERRUPTIBLE);
474		stop = abort ? atomic_read(abort) : 0;
475		if (stop || !host->claimed || host->claimer == current)
476			break;
477		spin_unlock_irqrestore(&host->lock, flags);
478		schedule();
479		spin_lock_irqsave(&host->lock, flags);
480	}
481	set_current_state(TASK_RUNNING);
482	if (!stop) {
483		host->claimed = 1;
484		host->claimer = current;
485		host->claim_cnt += 1;
486	} else
487		wake_up(&host->wq);
488	spin_unlock_irqrestore(&host->lock, flags);
489	remove_wait_queue(&host->wq, &wait);
490	if (!stop)
491		mmc_host_enable(host);
492	return stop;
493}
494
495EXPORT_SYMBOL(__mmc_claim_host);
496
497/**
498 *	mmc_try_claim_host - try exclusively to claim a host
499 *	@host: mmc host to claim
500 *
501 *	Returns %1 if the host is claimed, %0 otherwise.
502 */
503int mmc_try_claim_host(struct mmc_host *host)
504{
505	int claimed_host = 0;
506	unsigned long flags;
507
508	spin_lock_irqsave(&host->lock, flags);
509	if (!host->claimed || host->claimer == current) {
510		host->claimed = 1;
511		host->claimer = current;
512		host->claim_cnt += 1;
513		claimed_host = 1;
514	}
515	spin_unlock_irqrestore(&host->lock, flags);
516	return claimed_host;
517}
518EXPORT_SYMBOL(mmc_try_claim_host);
519
520static void mmc_do_release_host(struct mmc_host *host)
521{
522	unsigned long flags;
523
524	spin_lock_irqsave(&host->lock, flags);
525	if (--host->claim_cnt) {
526		/* Release for nested claim */
527		spin_unlock_irqrestore(&host->lock, flags);
528	} else {
529		host->claimed = 0;
530		host->claimer = NULL;
531		spin_unlock_irqrestore(&host->lock, flags);
532		wake_up(&host->wq);
533	}
534}
535
536void mmc_host_deeper_disable(struct work_struct *work)
537{
538	struct mmc_host *host =
539		container_of(work, struct mmc_host, disable.work);
540
541	/* If the host is claimed then we do not want to disable it anymore */
542	if (!mmc_try_claim_host(host))
543		return;
544	mmc_host_do_disable(host, 1);
545	mmc_do_release_host(host);
546}
547
548/**
549 *	mmc_host_lazy_disable - lazily disable a host.
550 *	@host: mmc host to disable
551 *
552 *	Hosts that support power saving can use the 'enable' and 'disable'
553 *	methods to exit and enter power saving states. For more information
554 *	see comments for struct mmc_host_ops.
555 */
556int mmc_host_lazy_disable(struct mmc_host *host)
557{
558	if (!(host->caps & MMC_CAP_DISABLE))
559		return 0;
560
561	if (host->en_dis_recurs)
562		return 0;
563
564	if (--host->nesting_cnt)
565		return 0;
566
567	if (!host->enabled)
568		return 0;
569
570	if (host->disable_delay) {
571		mmc_schedule_delayed_work(&host->disable,
572				msecs_to_jiffies(host->disable_delay));
573		return 0;
574	} else
575		return mmc_host_do_disable(host, 1);
576}
577EXPORT_SYMBOL(mmc_host_lazy_disable);
578
579/**
580 *	mmc_release_host - release a host
581 *	@host: mmc host to release
582 *
583 *	Release a MMC host, allowing others to claim the host
584 *	for their operations.
585 */
586void mmc_release_host(struct mmc_host *host)
587{
588	WARN_ON(!host->claimed);
589
590	mmc_host_lazy_disable(host);
591
592	mmc_do_release_host(host);
593}
594
595EXPORT_SYMBOL(mmc_release_host);
596
597/*
598 * Internal function that does the actual ios call to the host driver,
599 * optionally printing some debug output.
600 */
601static inline void mmc_set_ios(struct mmc_host *host)
602{
603	struct mmc_ios *ios = &host->ios;
604
605	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
606		"width %u timing %u\n",
607		 mmc_hostname(host), ios->clock, ios->bus_mode,
608		 ios->power_mode, ios->chip_select, ios->vdd,
609		 ios->bus_width, ios->timing);
610
611	host->ops->set_ios(host, ios);
612}
613
614/*
615 * Control chip select pin on a host.
616 */
617void mmc_set_chip_select(struct mmc_host *host, int mode)
618{
619	host->ios.chip_select = mode;
620	mmc_set_ios(host);
621}
622
623/*
624 * Sets the host clock to the highest possible frequency that
625 * is below "hz".
626 */
627void mmc_set_clock(struct mmc_host *host, unsigned int hz)
628{
629	WARN_ON(hz < host->f_min);
630
631	if (hz > host->f_max)
632		hz = host->f_max;
633
634	host->ios.clock = hz;
635	mmc_set_ios(host);
636}
637
638/*
639 * Change the bus mode (open drain/push-pull) of a host.
640 */
641void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
642{
643	host->ios.bus_mode = mode;
644	mmc_set_ios(host);
645}
646
647/*
648 * Change data bus width of a host.
649 */
650void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
651{
652	host->ios.bus_width = width;
653	mmc_set_ios(host);
654}
655
656/**
657 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
658 * @vdd:	voltage (mV)
659 * @low_bits:	prefer low bits in boundary cases
660 *
661 * This function returns the OCR bit number according to the provided @vdd
662 * value. If conversion is not possible a negative errno value returned.
663 *
664 * Depending on the @low_bits flag the function prefers low or high OCR bits
665 * on boundary voltages. For example,
666 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
667 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
668 *
669 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
670 */
671static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
672{
673	const int max_bit = ilog2(MMC_VDD_35_36);
674	int bit;
675
676	if (vdd < 1650 || vdd > 3600)
677		return -EINVAL;
678
679	if (vdd >= 1650 && vdd <= 1950)
680		return ilog2(MMC_VDD_165_195);
681
682	if (low_bits)
683		vdd -= 1;
684
685	/* Base 2000 mV, step 100 mV, bit's base 8. */
686	bit = (vdd - 2000) / 100 + 8;
687	if (bit > max_bit)
688		return max_bit;
689	return bit;
690}
691
692/**
693 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
694 * @vdd_min:	minimum voltage value (mV)
695 * @vdd_max:	maximum voltage value (mV)
696 *
697 * This function returns the OCR mask bits according to the provided @vdd_min
698 * and @vdd_max values. If conversion is not possible the function returns 0.
699 *
700 * Notes wrt boundary cases:
701 * This function sets the OCR bits for all boundary voltages, for example
702 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
703 * MMC_VDD_34_35 mask.
704 */
705u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
706{
707	u32 mask = 0;
708
709	if (vdd_max < vdd_min)
710		return 0;
711
712	/* Prefer high bits for the boundary vdd_max values. */
713	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
714	if (vdd_max < 0)
715		return 0;
716
717	/* Prefer low bits for the boundary vdd_min values. */
718	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
719	if (vdd_min < 0)
720		return 0;
721
722	/* Fill the mask, from max bit to min bit. */
723	while (vdd_max >= vdd_min)
724		mask |= 1 << vdd_max--;
725
726	return mask;
727}
728EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
729
730#ifdef CONFIG_REGULATOR
731
732/**
733 * mmc_regulator_get_ocrmask - return mask of supported voltages
734 * @supply: regulator to use
735 *
736 * This returns either a negative errno, or a mask of voltages that
737 * can be provided to MMC/SD/SDIO devices using the specified voltage
738 * regulator.  This would normally be called before registering the
739 * MMC host adapter.
740 */
741int mmc_regulator_get_ocrmask(struct regulator *supply)
742{
743	int			result = 0;
744	int			count;
745	int			i;
746
747	count = regulator_count_voltages(supply);
748	if (count < 0)
749		return count;
750
751	for (i = 0; i < count; i++) {
752		int		vdd_uV;
753		int		vdd_mV;
754
755		vdd_uV = regulator_list_voltage(supply, i);
756		if (vdd_uV <= 0)
757			continue;
758
759		vdd_mV = vdd_uV / 1000;
760		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
761	}
762
763	return result;
764}
765EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
766
767/**
768 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
769 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
770 * @supply: regulator to use
771 *
772 * Returns zero on success, else negative errno.
773 *
774 * MMC host drivers may use this to enable or disable a regulator using
775 * a particular supply voltage.  This would normally be called from the
776 * set_ios() method.
777 */
778int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
779{
780	int			result = 0;
781	int			min_uV, max_uV;
782	int			enabled;
783
784	enabled = regulator_is_enabled(supply);
785	if (enabled < 0)
786		return enabled;
787
788	if (vdd_bit) {
789		int		tmp;
790		int		voltage;
791
792		/* REVISIT mmc_vddrange_to_ocrmask() may have set some
793		 * bits this regulator doesn't quite support ... don't
794		 * be too picky, most cards and regulators are OK with
795		 * a 0.1V range goof (it's a small error percentage).
796		 */
797		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
798		if (tmp == 0) {
799			min_uV = 1650 * 1000;
800			max_uV = 1950 * 1000;
801		} else {
802			min_uV = 1900 * 1000 + tmp * 100 * 1000;
803			max_uV = min_uV + 100 * 1000;
804		}
805
806		/* avoid needless changes to this voltage; the regulator
807		 * might not allow this operation
808		 */
809		voltage = regulator_get_voltage(supply);
810		if (voltage < 0)
811			result = voltage;
812		else if (voltage < min_uV || voltage > max_uV)
813			result = regulator_set_voltage(supply, min_uV, max_uV);
814		else
815			result = 0;
816
817		if (result == 0 && !enabled)
818			result = regulator_enable(supply);
819	} else if (enabled) {
820		result = regulator_disable(supply);
821	}
822
823	return result;
824}
825EXPORT_SYMBOL(mmc_regulator_set_ocr);
826
827#endif
828
829/*
830 * Mask off any voltages we don't support and select
831 * the lowest voltage
832 */
833u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
834{
835	int bit;
836
837	ocr &= host->ocr_avail;
838
839	bit = ffs(ocr);
840	if (bit) {
841		bit -= 1;
842
843		ocr &= 3 << bit;
844
845		host->ios.vdd = bit;
846		mmc_set_ios(host);
847	} else {
848		pr_warning("%s: host doesn't support card's voltages\n",
849				mmc_hostname(host));
850		ocr = 0;
851	}
852
853	return ocr;
854}
855
856/*
857 * Select timing parameters for host.
858 */
859void mmc_set_timing(struct mmc_host *host, unsigned int timing)
860{
861	host->ios.timing = timing;
862	mmc_set_ios(host);
863}
864
865/*
866 * Apply power to the MMC stack.  This is a two-stage process.
867 * First, we enable power to the card without the clock running.
868 * We then wait a bit for the power to stabilise.  Finally,
869 * enable the bus drivers and clock to the card.
870 *
871 * We must _NOT_ enable the clock prior to power stablising.
872 *
873 * If a host does all the power sequencing itself, ignore the
874 * initial MMC_POWER_UP stage.
875 */
876static void mmc_power_up(struct mmc_host *host)
877{
878	int bit;
879
880	/* If ocr is set, we use it */
881	if (host->ocr)
882		bit = ffs(host->ocr) - 1;
883	else
884		bit = fls(host->ocr_avail) - 1;
885
886	host->ios.vdd = bit;
887	if (mmc_host_is_spi(host)) {
888		host->ios.chip_select = MMC_CS_HIGH;
889		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
890	} else {
891		host->ios.chip_select = MMC_CS_DONTCARE;
892		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
893	}
894	host->ios.power_mode = MMC_POWER_UP;
895	host->ios.bus_width = MMC_BUS_WIDTH_1;
896	host->ios.timing = MMC_TIMING_LEGACY;
897	mmc_set_ios(host);
898
899	/*
900	 * This delay should be sufficient to allow the power supply
901	 * to reach the minimum voltage.
902	 */
903	mmc_delay(10);
904
905	if (host->f_min > 400000) {
906		pr_warning("%s: Minimum clock frequency too high for "
907				"identification mode\n", mmc_hostname(host));
908		host->ios.clock = host->f_min;
909	} else
910		host->ios.clock = 400000;
911
912	host->ios.power_mode = MMC_POWER_ON;
913	mmc_set_ios(host);
914
915	/*
916	 * This delay must be at least 74 clock sizes, or 1 ms, or the
917	 * time required to reach a stable voltage.
918	 */
919	mmc_delay(10);
920}
921
922static void mmc_power_off(struct mmc_host *host)
923{
924	host->ios.clock = 0;
925	host->ios.vdd = 0;
926	if (!mmc_host_is_spi(host)) {
927		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
928		host->ios.chip_select = MMC_CS_DONTCARE;
929	}
930	host->ios.power_mode = MMC_POWER_OFF;
931	host->ios.bus_width = MMC_BUS_WIDTH_1;
932	host->ios.timing = MMC_TIMING_LEGACY;
933	mmc_set_ios(host);
934}
935
936/*
937 * Cleanup when the last reference to the bus operator is dropped.
938 */
939static void __mmc_release_bus(struct mmc_host *host)
940{
941	BUG_ON(!host);
942	BUG_ON(host->bus_refs);
943	BUG_ON(!host->bus_dead);
944
945	host->bus_ops = NULL;
946}
947
948/*
949 * Increase reference count of bus operator
950 */
951static inline void mmc_bus_get(struct mmc_host *host)
952{
953	unsigned long flags;
954
955	spin_lock_irqsave(&host->lock, flags);
956	host->bus_refs++;
957	spin_unlock_irqrestore(&host->lock, flags);
958}
959
960/*
961 * Decrease reference count of bus operator and free it if
962 * it is the last reference.
963 */
964static inline void mmc_bus_put(struct mmc_host *host)
965{
966	unsigned long flags;
967
968	spin_lock_irqsave(&host->lock, flags);
969	host->bus_refs--;
970	if ((host->bus_refs == 0) && host->bus_ops)
971		__mmc_release_bus(host);
972	spin_unlock_irqrestore(&host->lock, flags);
973}
974
975/*
976 * Assign a mmc bus handler to a host. Only one bus handler may control a
977 * host at any given time.
978 */
979void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
980{
981	unsigned long flags;
982
983	BUG_ON(!host);
984	BUG_ON(!ops);
985
986	WARN_ON(!host->claimed);
987
988	spin_lock_irqsave(&host->lock, flags);
989
990	BUG_ON(host->bus_ops);
991	BUG_ON(host->bus_refs);
992
993	host->bus_ops = ops;
994	host->bus_refs = 1;
995	host->bus_dead = 0;
996
997	spin_unlock_irqrestore(&host->lock, flags);
998}
999
1000/*
1001 * Remove the current bus handler from a host. Assumes that there are
1002 * no interesting cards left, so the bus is powered down.
1003 */
1004void mmc_detach_bus(struct mmc_host *host)
1005{
1006	unsigned long flags;
1007
1008	BUG_ON(!host);
1009
1010	WARN_ON(!host->claimed);
1011	WARN_ON(!host->bus_ops);
1012
1013	spin_lock_irqsave(&host->lock, flags);
1014
1015	host->bus_dead = 1;
1016
1017	spin_unlock_irqrestore(&host->lock, flags);
1018
1019	mmc_power_off(host);
1020
1021	mmc_bus_put(host);
1022}
1023
1024/**
1025 *	mmc_detect_change - process change of state on a MMC socket
1026 *	@host: host which changed state.
1027 *	@delay: optional delay to wait before detection (jiffies)
1028 *
1029 *	MMC drivers should call this when they detect a card has been
1030 *	inserted or removed. The MMC layer will confirm that any
1031 *	present card is still functional, and initialize any newly
1032 *	inserted.
1033 */
1034void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1035{
1036#ifdef CONFIG_MMC_DEBUG
1037	unsigned long flags;
1038	spin_lock_irqsave(&host->lock, flags);
1039	WARN_ON(host->removed);
1040	spin_unlock_irqrestore(&host->lock, flags);
1041#endif
1042
1043	mmc_schedule_delayed_work(&host->detect, delay);
1044}
1045
1046EXPORT_SYMBOL(mmc_detect_change);
1047
1048void mmc_init_erase(struct mmc_card *card)
1049{
1050	unsigned int sz;
1051
1052	if (is_power_of_2(card->erase_size))
1053		card->erase_shift = ffs(card->erase_size) - 1;
1054	else
1055		card->erase_shift = 0;
1056
1057	/*
1058	 * It is possible to erase an arbitrarily large area of an SD or MMC
1059	 * card.  That is not desirable because it can take a long time
1060	 * (minutes) potentially delaying more important I/O, and also the
1061	 * timeout calculations become increasingly hugely over-estimated.
1062	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1063	 * to that size and alignment.
1064	 *
1065	 * For SD cards that define Allocation Unit size, limit erases to one
1066	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1067	 * Erase Size, whether it is switched on or not, limit to that size.
1068	 * Otherwise just have a stab at a good value.  For modern cards it
1069	 * will end up being 4MiB.  Note that if the value is too small, it
1070	 * can end up taking longer to erase.
1071	 */
1072	if (mmc_card_sd(card) && card->ssr.au) {
1073		card->pref_erase = card->ssr.au;
1074		card->erase_shift = ffs(card->ssr.au) - 1;
1075	} else if (card->ext_csd.hc_erase_size) {
1076		card->pref_erase = card->ext_csd.hc_erase_size;
1077	} else {
1078		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1079		if (sz < 128)
1080			card->pref_erase = 512 * 1024 / 512;
1081		else if (sz < 512)
1082			card->pref_erase = 1024 * 1024 / 512;
1083		else if (sz < 1024)
1084			card->pref_erase = 2 * 1024 * 1024 / 512;
1085		else
1086			card->pref_erase = 4 * 1024 * 1024 / 512;
1087		if (card->pref_erase < card->erase_size)
1088			card->pref_erase = card->erase_size;
1089		else {
1090			sz = card->pref_erase % card->erase_size;
1091			if (sz)
1092				card->pref_erase += card->erase_size - sz;
1093		}
1094	}
1095}
1096
1097static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1098				      struct mmc_command *cmd,
1099				      unsigned int arg, unsigned int qty)
1100{
1101	unsigned int erase_timeout;
1102
1103	if (card->ext_csd.erase_group_def & 1) {
1104		/* High Capacity Erase Group Size uses HC timeouts */
1105		if (arg == MMC_TRIM_ARG)
1106			erase_timeout = card->ext_csd.trim_timeout;
1107		else
1108			erase_timeout = card->ext_csd.hc_erase_timeout;
1109	} else {
1110		/* CSD Erase Group Size uses write timeout */
1111		unsigned int mult = (10 << card->csd.r2w_factor);
1112		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1113		unsigned int timeout_us;
1114
1115		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1116		if (card->csd.tacc_ns < 1000000)
1117			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1118		else
1119			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1120
1121		/*
1122		 * ios.clock is only a target.  The real clock rate might be
1123		 * less but not that much less, so fudge it by multiplying by 2.
1124		 */
1125		timeout_clks <<= 1;
1126		timeout_us += (timeout_clks * 1000) /
1127			      (card->host->ios.clock / 1000);
1128
1129		erase_timeout = timeout_us / 1000;
1130
1131		/*
1132		 * Theoretically, the calculation could underflow so round up
1133		 * to 1ms in that case.
1134		 */
1135		if (!erase_timeout)
1136			erase_timeout = 1;
1137	}
1138
1139	/* Multiplier for secure operations */
1140	if (arg & MMC_SECURE_ARGS) {
1141		if (arg == MMC_SECURE_ERASE_ARG)
1142			erase_timeout *= card->ext_csd.sec_erase_mult;
1143		else
1144			erase_timeout *= card->ext_csd.sec_trim_mult;
1145	}
1146
1147	erase_timeout *= qty;
1148
1149	/*
1150	 * Ensure at least a 1 second timeout for SPI as per
1151	 * 'mmc_set_data_timeout()'
1152	 */
1153	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1154		erase_timeout = 1000;
1155
1156	cmd->erase_timeout = erase_timeout;
1157}
1158
1159static void mmc_set_sd_erase_timeout(struct mmc_card *card,
1160				     struct mmc_command *cmd, unsigned int arg,
1161				     unsigned int qty)
1162{
1163	if (card->ssr.erase_timeout) {
1164		/* Erase timeout specified in SD Status Register (SSR) */
1165		cmd->erase_timeout = card->ssr.erase_timeout * qty +
1166				     card->ssr.erase_offset;
1167	} else {
1168		/*
1169		 * Erase timeout not specified in SD Status Register (SSR) so
1170		 * use 250ms per write block.
1171		 */
1172		cmd->erase_timeout = 250 * qty;
1173	}
1174
1175	/* Must not be less than 1 second */
1176	if (cmd->erase_timeout < 1000)
1177		cmd->erase_timeout = 1000;
1178}
1179
1180static void mmc_set_erase_timeout(struct mmc_card *card,
1181				  struct mmc_command *cmd, unsigned int arg,
1182				  unsigned int qty)
1183{
1184	if (mmc_card_sd(card))
1185		mmc_set_sd_erase_timeout(card, cmd, arg, qty);
1186	else
1187		mmc_set_mmc_erase_timeout(card, cmd, arg, qty);
1188}
1189
1190static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1191			unsigned int to, unsigned int arg)
1192{
1193	struct mmc_command cmd;
1194	unsigned int qty = 0;
1195	int err;
1196
1197	/*
1198	 * qty is used to calculate the erase timeout which depends on how many
1199	 * erase groups (or allocation units in SD terminology) are affected.
1200	 * We count erasing part of an erase group as one erase group.
1201	 * For SD, the allocation units are always a power of 2.  For MMC, the
1202	 * erase group size is almost certainly also power of 2, but it does not
1203	 * seem to insist on that in the JEDEC standard, so we fall back to
1204	 * division in that case.  SD may not specify an allocation unit size,
1205	 * in which case the timeout is based on the number of write blocks.
1206	 *
1207	 * Note that the timeout for secure trim 2 will only be correct if the
1208	 * number of erase groups specified is the same as the total of all
1209	 * preceding secure trim 1 commands.  Since the power may have been
1210	 * lost since the secure trim 1 commands occurred, it is generally
1211	 * impossible to calculate the secure trim 2 timeout correctly.
1212	 */
1213	if (card->erase_shift)
1214		qty += ((to >> card->erase_shift) -
1215			(from >> card->erase_shift)) + 1;
1216	else if (mmc_card_sd(card))
1217		qty += to - from + 1;
1218	else
1219		qty += ((to / card->erase_size) -
1220			(from / card->erase_size)) + 1;
1221
1222	if (!mmc_card_blockaddr(card)) {
1223		from <<= 9;
1224		to <<= 9;
1225	}
1226
1227	memset(&cmd, 0, sizeof(struct mmc_command));
1228	if (mmc_card_sd(card))
1229		cmd.opcode = SD_ERASE_WR_BLK_START;
1230	else
1231		cmd.opcode = MMC_ERASE_GROUP_START;
1232	cmd.arg = from;
1233	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1234	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1235	if (err) {
1236		printk(KERN_ERR "mmc_erase: group start error %d, "
1237		       "status %#x\n", err, cmd.resp[0]);
1238		err = -EINVAL;
1239		goto out;
1240	}
1241
1242	memset(&cmd, 0, sizeof(struct mmc_command));
1243	if (mmc_card_sd(card))
1244		cmd.opcode = SD_ERASE_WR_BLK_END;
1245	else
1246		cmd.opcode = MMC_ERASE_GROUP_END;
1247	cmd.arg = to;
1248	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1249	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1250	if (err) {
1251		printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1252		       err, cmd.resp[0]);
1253		err = -EINVAL;
1254		goto out;
1255	}
1256
1257	memset(&cmd, 0, sizeof(struct mmc_command));
1258	cmd.opcode = MMC_ERASE;
1259	cmd.arg = arg;
1260	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1261	mmc_set_erase_timeout(card, &cmd, arg, qty);
1262	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1263	if (err) {
1264		printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1265		       err, cmd.resp[0]);
1266		err = -EIO;
1267		goto out;
1268	}
1269
1270	if (mmc_host_is_spi(card->host))
1271		goto out;
1272
1273	do {
1274		memset(&cmd, 0, sizeof(struct mmc_command));
1275		cmd.opcode = MMC_SEND_STATUS;
1276		cmd.arg = card->rca << 16;
1277		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1278		/* Do not retry else we can't see errors */
1279		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1280		if (err || (cmd.resp[0] & 0xFDF92000)) {
1281			printk(KERN_ERR "error %d requesting status %#x\n",
1282				err, cmd.resp[0]);
1283			err = -EIO;
1284			goto out;
1285		}
1286	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1287		 R1_CURRENT_STATE(cmd.resp[0]) == 7);
1288out:
1289	return err;
1290}
1291
1292/**
1293 * mmc_erase - erase sectors.
1294 * @card: card to erase
1295 * @from: first sector to erase
1296 * @nr: number of sectors to erase
1297 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1298 *
1299 * Caller must claim host before calling this function.
1300 */
1301int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1302	      unsigned int arg)
1303{
1304	unsigned int rem, to = from + nr;
1305
1306	if (!(card->host->caps & MMC_CAP_ERASE) ||
1307	    !(card->csd.cmdclass & CCC_ERASE))
1308		return -EOPNOTSUPP;
1309
1310	if (!card->erase_size)
1311		return -EOPNOTSUPP;
1312
1313	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1314		return -EOPNOTSUPP;
1315
1316	if ((arg & MMC_SECURE_ARGS) &&
1317	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1318		return -EOPNOTSUPP;
1319
1320	if ((arg & MMC_TRIM_ARGS) &&
1321	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1322		return -EOPNOTSUPP;
1323
1324	if (arg == MMC_SECURE_ERASE_ARG) {
1325		if (from % card->erase_size || nr % card->erase_size)
1326			return -EINVAL;
1327	}
1328
1329	if (arg == MMC_ERASE_ARG) {
1330		rem = from % card->erase_size;
1331		if (rem) {
1332			rem = card->erase_size - rem;
1333			from += rem;
1334			if (nr > rem)
1335				nr -= rem;
1336			else
1337				return 0;
1338		}
1339		rem = nr % card->erase_size;
1340		if (rem)
1341			nr -= rem;
1342	}
1343
1344	if (nr == 0)
1345		return 0;
1346
1347	to = from + nr;
1348
1349	if (to <= from)
1350		return -EINVAL;
1351
1352	/* 'from' and 'to' are inclusive */
1353	to -= 1;
1354
1355	return mmc_do_erase(card, from, to, arg);
1356}
1357EXPORT_SYMBOL(mmc_erase);
1358
1359int mmc_can_erase(struct mmc_card *card)
1360{
1361	if ((card->host->caps & MMC_CAP_ERASE) &&
1362	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1363		return 1;
1364	return 0;
1365}
1366EXPORT_SYMBOL(mmc_can_erase);
1367
1368int mmc_can_trim(struct mmc_card *card)
1369{
1370	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1371		return 1;
1372	return 0;
1373}
1374EXPORT_SYMBOL(mmc_can_trim);
1375
1376int mmc_can_secure_erase_trim(struct mmc_card *card)
1377{
1378	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1379		return 1;
1380	return 0;
1381}
1382EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1383
1384int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1385			    unsigned int nr)
1386{
1387	if (!card->erase_size)
1388		return 0;
1389	if (from % card->erase_size || nr % card->erase_size)
1390		return 0;
1391	return 1;
1392}
1393EXPORT_SYMBOL(mmc_erase_group_aligned);
1394
1395void mmc_rescan(struct work_struct *work)
1396{
1397	struct mmc_host *host =
1398		container_of(work, struct mmc_host, detect.work);
1399	u32 ocr;
1400	int err;
1401	unsigned long flags;
1402
1403	spin_lock_irqsave(&host->lock, flags);
1404
1405	if (host->rescan_disable) {
1406		spin_unlock_irqrestore(&host->lock, flags);
1407		return;
1408	}
1409
1410	spin_unlock_irqrestore(&host->lock, flags);
1411
1412
1413	mmc_bus_get(host);
1414
1415	/* if there is a card registered, check whether it is still present */
1416	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
1417		host->bus_ops->detect(host);
1418
1419	mmc_bus_put(host);
1420
1421
1422	mmc_bus_get(host);
1423
1424	/* if there still is a card present, stop here */
1425	if (host->bus_ops != NULL) {
1426		mmc_bus_put(host);
1427		goto out;
1428	}
1429
1430	/* detect a newly inserted card */
1431
1432	/*
1433	 * Only we can add a new handler, so it's safe to
1434	 * release the lock here.
1435	 */
1436	mmc_bus_put(host);
1437
1438	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1439		goto out;
1440
1441	mmc_claim_host(host);
1442
1443	mmc_power_up(host);
1444	sdio_reset(host);
1445	mmc_go_idle(host);
1446
1447	mmc_send_if_cond(host, host->ocr_avail);
1448
1449	/*
1450	 * First we search for SDIO...
1451	 */
1452	err = mmc_send_io_op_cond(host, 0, &ocr);
1453	if (!err) {
1454		if (mmc_attach_sdio(host, ocr)) {
1455			mmc_claim_host(host);
1456			/* try SDMEM (but not MMC) even if SDIO is broken */
1457			if (mmc_send_app_op_cond(host, 0, &ocr))
1458				goto out_fail;
1459
1460			if (mmc_attach_sd(host, ocr))
1461				mmc_power_off(host);
1462		}
1463		goto out;
1464	}
1465
1466	/*
1467	 * ...then normal SD...
1468	 */
1469	err = mmc_send_app_op_cond(host, 0, &ocr);
1470	if (!err) {
1471		if (mmc_attach_sd(host, ocr))
1472			mmc_power_off(host);
1473		goto out;
1474	}
1475
1476	/*
1477	 * ...and finally MMC.
1478	 */
1479	err = mmc_send_op_cond(host, 0, &ocr);
1480	if (!err) {
1481		if (mmc_attach_mmc(host, ocr))
1482			mmc_power_off(host);
1483		goto out;
1484	}
1485
1486out_fail:
1487	mmc_release_host(host);
1488	mmc_power_off(host);
1489
1490out:
1491	if (host->caps & MMC_CAP_NEEDS_POLL)
1492		mmc_schedule_delayed_work(&host->detect, HZ);
1493}
1494
1495void mmc_start_host(struct mmc_host *host)
1496{
1497	mmc_power_off(host);
1498	mmc_detect_change(host, 0);
1499}
1500
1501void mmc_stop_host(struct mmc_host *host)
1502{
1503#ifdef CONFIG_MMC_DEBUG
1504	unsigned long flags;
1505	spin_lock_irqsave(&host->lock, flags);
1506	host->removed = 1;
1507	spin_unlock_irqrestore(&host->lock, flags);
1508#endif
1509
1510	if (host->caps & MMC_CAP_DISABLE)
1511		cancel_delayed_work(&host->disable);
1512	cancel_delayed_work_sync(&host->detect);
1513	mmc_flush_scheduled_work();
1514
1515	/* clear pm flags now and let card drivers set them as needed */
1516	host->pm_flags = 0;
1517
1518	mmc_bus_get(host);
1519	if (host->bus_ops && !host->bus_dead) {
1520		if (host->bus_ops->remove)
1521			host->bus_ops->remove(host);
1522
1523		mmc_claim_host(host);
1524		mmc_detach_bus(host);
1525		mmc_release_host(host);
1526		mmc_bus_put(host);
1527		return;
1528	}
1529	mmc_bus_put(host);
1530
1531	BUG_ON(host->card);
1532
1533	mmc_power_off(host);
1534}
1535
1536void mmc_power_save_host(struct mmc_host *host)
1537{
1538	mmc_bus_get(host);
1539
1540	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1541		mmc_bus_put(host);
1542		return;
1543	}
1544
1545	if (host->bus_ops->power_save)
1546		host->bus_ops->power_save(host);
1547
1548	mmc_bus_put(host);
1549
1550	mmc_power_off(host);
1551}
1552EXPORT_SYMBOL(mmc_power_save_host);
1553
1554void mmc_power_restore_host(struct mmc_host *host)
1555{
1556	mmc_bus_get(host);
1557
1558	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1559		mmc_bus_put(host);
1560		return;
1561	}
1562
1563	mmc_power_up(host);
1564	host->bus_ops->power_restore(host);
1565
1566	mmc_bus_put(host);
1567}
1568EXPORT_SYMBOL(mmc_power_restore_host);
1569
1570int mmc_card_awake(struct mmc_host *host)
1571{
1572	int err = -ENOSYS;
1573
1574	mmc_bus_get(host);
1575
1576	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1577		err = host->bus_ops->awake(host);
1578
1579	mmc_bus_put(host);
1580
1581	return err;
1582}
1583EXPORT_SYMBOL(mmc_card_awake);
1584
1585int mmc_card_sleep(struct mmc_host *host)
1586{
1587	int err = -ENOSYS;
1588
1589	mmc_bus_get(host);
1590
1591	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1592		err = host->bus_ops->sleep(host);
1593
1594	mmc_bus_put(host);
1595
1596	return err;
1597}
1598EXPORT_SYMBOL(mmc_card_sleep);
1599
1600int mmc_card_can_sleep(struct mmc_host *host)
1601{
1602	struct mmc_card *card = host->card;
1603
1604	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1605		return 1;
1606	return 0;
1607}
1608EXPORT_SYMBOL(mmc_card_can_sleep);
1609
1610#ifdef CONFIG_PM
1611
1612/**
1613 *	mmc_suspend_host - suspend a host
1614 *	@host: mmc host
1615 */
1616int mmc_suspend_host(struct mmc_host *host)
1617{
1618	int err = 0;
1619
1620	if (host->caps & MMC_CAP_DISABLE)
1621		cancel_delayed_work(&host->disable);
1622	cancel_delayed_work(&host->detect);
1623	mmc_flush_scheduled_work();
1624
1625	mmc_bus_get(host);
1626	if (host->bus_ops && !host->bus_dead) {
1627		if (host->bus_ops->suspend)
1628			err = host->bus_ops->suspend(host);
1629		if (err == -ENOSYS || !host->bus_ops->resume) {
1630			/*
1631			 * We simply "remove" the card in this case.
1632			 * It will be redetected on resume.
1633			 */
1634			if (host->bus_ops->remove)
1635				host->bus_ops->remove(host);
1636			mmc_claim_host(host);
1637			mmc_detach_bus(host);
1638			mmc_release_host(host);
1639			host->pm_flags = 0;
1640			err = 0;
1641		}
1642	}
1643	mmc_bus_put(host);
1644
1645	if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER))
1646		mmc_power_off(host);
1647
1648	return err;
1649}
1650
1651EXPORT_SYMBOL(mmc_suspend_host);
1652
1653/**
1654 *	mmc_resume_host - resume a previously suspended host
1655 *	@host: mmc host
1656 */
1657int mmc_resume_host(struct mmc_host *host)
1658{
1659	int err = 0;
1660
1661	mmc_bus_get(host);
1662	if (host->bus_ops && !host->bus_dead) {
1663		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1664			mmc_power_up(host);
1665			mmc_select_voltage(host, host->ocr);
1666		}
1667		BUG_ON(!host->bus_ops->resume);
1668		err = host->bus_ops->resume(host);
1669		if (err) {
1670			printk(KERN_WARNING "%s: error %d during resume "
1671					    "(card was removed?)\n",
1672					    mmc_hostname(host), err);
1673			err = 0;
1674		}
1675	}
1676	mmc_bus_put(host);
1677
1678	return err;
1679}
1680EXPORT_SYMBOL(mmc_resume_host);
1681
1682/* Do the card removal on suspend if card is assumed removeable
1683 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
1684   to sync the card.
1685*/
1686int mmc_pm_notify(struct notifier_block *notify_block,
1687					unsigned long mode, void *unused)
1688{
1689	struct mmc_host *host = container_of(
1690		notify_block, struct mmc_host, pm_notify);
1691	unsigned long flags;
1692
1693
1694	switch (mode) {
1695	case PM_HIBERNATION_PREPARE:
1696	case PM_SUSPEND_PREPARE:
1697
1698		spin_lock_irqsave(&host->lock, flags);
1699		host->rescan_disable = 1;
1700		spin_unlock_irqrestore(&host->lock, flags);
1701		cancel_delayed_work_sync(&host->detect);
1702
1703		if (!host->bus_ops || host->bus_ops->suspend)
1704			break;
1705
1706		mmc_claim_host(host);
1707
1708		if (host->bus_ops->remove)
1709			host->bus_ops->remove(host);
1710
1711		mmc_detach_bus(host);
1712		mmc_release_host(host);
1713		host->pm_flags = 0;
1714		break;
1715
1716	case PM_POST_SUSPEND:
1717	case PM_POST_HIBERNATION:
1718	case PM_POST_RESTORE:
1719
1720		spin_lock_irqsave(&host->lock, flags);
1721		host->rescan_disable = 0;
1722		spin_unlock_irqrestore(&host->lock, flags);
1723		mmc_detect_change(host, 0);
1724
1725	}
1726
1727	return 0;
1728}
1729#endif
1730
1731static int __init mmc_init(void)
1732{
1733	int ret;
1734
1735	workqueue = create_singlethread_workqueue("kmmcd");
1736	if (!workqueue)
1737		return -ENOMEM;
1738
1739	ret = mmc_register_bus();
1740	if (ret)
1741		goto destroy_workqueue;
1742
1743	ret = mmc_register_host_class();
1744	if (ret)
1745		goto unregister_bus;
1746
1747	ret = sdio_register_bus();
1748	if (ret)
1749		goto unregister_host_class;
1750
1751	return 0;
1752
1753unregister_host_class:
1754	mmc_unregister_host_class();
1755unregister_bus:
1756	mmc_unregister_bus();
1757destroy_workqueue:
1758	destroy_workqueue(workqueue);
1759
1760	return ret;
1761}
1762
1763static void __exit mmc_exit(void)
1764{
1765	sdio_unregister_bus();
1766	mmc_unregister_host_class();
1767	mmc_unregister_bus();
1768	destroy_workqueue(workqueue);
1769}
1770
1771subsys_initcall(mmc_init);
1772module_exit(mmc_exit);
1773
1774MODULE_LICENSE("GPL");
1775