• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mmc/card/
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author:  Andrew Christian
18 *          28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
31#include <linux/mutex.h>
32#include <linux/smp_lock.h>
33#include <linux/scatterlist.h>
34#include <linux/string_helpers.h>
35
36#include <linux/mmc/card.h>
37#include <linux/mmc/host.h>
38#include <linux/mmc/mmc.h>
39#include <linux/mmc/sd.h>
40
41#include <asm/system.h>
42#include <asm/uaccess.h>
43
44#include "queue.h"
45
46MODULE_ALIAS("mmc:block");
47
48/*
49 * max 8 partitions per card
50 */
51#define MMC_SHIFT	3
52#define MMC_NUM_MINORS	(256 >> MMC_SHIFT)
53
54static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
55
56/*
57 * There is one mmc_blk_data per slot.
58 */
59struct mmc_blk_data {
60	spinlock_t	lock;
61	struct gendisk	*disk;
62	struct mmc_queue queue;
63
64	unsigned int	usage;
65	unsigned int	read_only;
66};
67
68static DEFINE_MUTEX(open_lock);
69
70static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
71{
72	struct mmc_blk_data *md;
73
74	mutex_lock(&open_lock);
75	md = disk->private_data;
76	if (md && md->usage == 0)
77		md = NULL;
78	if (md)
79		md->usage++;
80	mutex_unlock(&open_lock);
81
82	return md;
83}
84
85static void mmc_blk_put(struct mmc_blk_data *md)
86{
87	mutex_lock(&open_lock);
88	md->usage--;
89	if (md->usage == 0) {
90		int devmaj = MAJOR(disk_devt(md->disk));
91		int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
92
93		if (!devmaj)
94			devidx = md->disk->first_minor >> MMC_SHIFT;
95
96		blk_cleanup_queue(md->queue.queue);
97
98		__clear_bit(devidx, dev_use);
99
100		put_disk(md->disk);
101		kfree(md);
102	}
103	mutex_unlock(&open_lock);
104}
105
106static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
107{
108	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
109	int ret = -ENXIO;
110
111	lock_kernel();
112	if (md) {
113		if (md->usage == 2)
114			check_disk_change(bdev);
115		ret = 0;
116
117		if ((mode & FMODE_WRITE) && md->read_only) {
118			mmc_blk_put(md);
119			ret = -EROFS;
120		}
121	}
122	unlock_kernel();
123
124	return ret;
125}
126
127static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
128{
129	struct mmc_blk_data *md = disk->private_data;
130
131	lock_kernel();
132	mmc_blk_put(md);
133	unlock_kernel();
134	return 0;
135}
136
137static int
138mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
139{
140	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
141	geo->heads = 4;
142	geo->sectors = 16;
143	return 0;
144}
145
146static const struct block_device_operations mmc_bdops = {
147	.open			= mmc_blk_open,
148	.release		= mmc_blk_release,
149	.getgeo			= mmc_blk_getgeo,
150	.owner			= THIS_MODULE,
151};
152
153struct mmc_blk_request {
154	struct mmc_request	mrq;
155	struct mmc_command	cmd;
156	struct mmc_command	stop;
157	struct mmc_data		data;
158};
159
160static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
161{
162	int err;
163	u32 result;
164	__be32 *blocks;
165
166	struct mmc_request mrq;
167	struct mmc_command cmd;
168	struct mmc_data data;
169	unsigned int timeout_us;
170
171	struct scatterlist sg;
172
173	memset(&cmd, 0, sizeof(struct mmc_command));
174
175	cmd.opcode = MMC_APP_CMD;
176	cmd.arg = card->rca << 16;
177	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
178
179	err = mmc_wait_for_cmd(card->host, &cmd, 0);
180	if (err)
181		return (u32)-1;
182	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
183		return (u32)-1;
184
185	memset(&cmd, 0, sizeof(struct mmc_command));
186
187	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
188	cmd.arg = 0;
189	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
190
191	memset(&data, 0, sizeof(struct mmc_data));
192
193	data.timeout_ns = card->csd.tacc_ns * 100;
194	data.timeout_clks = card->csd.tacc_clks * 100;
195
196	timeout_us = data.timeout_ns / 1000;
197	timeout_us += data.timeout_clks * 1000 /
198		(card->host->ios.clock / 1000);
199
200	if (timeout_us > 100000) {
201		data.timeout_ns = 100000000;
202		data.timeout_clks = 0;
203	}
204
205	data.blksz = 4;
206	data.blocks = 1;
207	data.flags = MMC_DATA_READ;
208	data.sg = &sg;
209	data.sg_len = 1;
210
211	memset(&mrq, 0, sizeof(struct mmc_request));
212
213	mrq.cmd = &cmd;
214	mrq.data = &data;
215
216	blocks = kmalloc(4, GFP_KERNEL);
217	if (!blocks)
218		return (u32)-1;
219
220	sg_init_one(&sg, blocks, 4);
221
222	mmc_wait_for_req(card->host, &mrq);
223
224	result = ntohl(*blocks);
225	kfree(blocks);
226
227	if (cmd.error || data.error)
228		result = (u32)-1;
229
230	return result;
231}
232
233static u32 get_card_status(struct mmc_card *card, struct request *req)
234{
235	struct mmc_command cmd;
236	int err;
237
238	memset(&cmd, 0, sizeof(struct mmc_command));
239	cmd.opcode = MMC_SEND_STATUS;
240	if (!mmc_host_is_spi(card->host))
241		cmd.arg = card->rca << 16;
242	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
243	err = mmc_wait_for_cmd(card->host, &cmd, 0);
244	if (err)
245		printk(KERN_ERR "%s: error %d sending status comand",
246		       req->rq_disk->disk_name, err);
247	return cmd.resp[0];
248}
249
250static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
251{
252	struct mmc_blk_data *md = mq->data;
253	struct mmc_card *card = md->queue.card;
254	unsigned int from, nr, arg;
255	int err = 0;
256
257	mmc_claim_host(card->host);
258
259	if (!mmc_can_erase(card)) {
260		err = -EOPNOTSUPP;
261		goto out;
262	}
263
264	from = blk_rq_pos(req);
265	nr = blk_rq_sectors(req);
266
267	if (mmc_can_trim(card))
268		arg = MMC_TRIM_ARG;
269	else
270		arg = MMC_ERASE_ARG;
271
272	err = mmc_erase(card, from, nr, arg);
273out:
274	spin_lock_irq(&md->lock);
275	__blk_end_request(req, err, blk_rq_bytes(req));
276	spin_unlock_irq(&md->lock);
277
278	mmc_release_host(card->host);
279
280	return err ? 0 : 1;
281}
282
283static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
284				       struct request *req)
285{
286	struct mmc_blk_data *md = mq->data;
287	struct mmc_card *card = md->queue.card;
288	unsigned int from, nr, arg;
289	int err = 0;
290
291	mmc_claim_host(card->host);
292
293	if (!mmc_can_secure_erase_trim(card)) {
294		err = -EOPNOTSUPP;
295		goto out;
296	}
297
298	from = blk_rq_pos(req);
299	nr = blk_rq_sectors(req);
300
301	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
302		arg = MMC_SECURE_TRIM1_ARG;
303	else
304		arg = MMC_SECURE_ERASE_ARG;
305
306	err = mmc_erase(card, from, nr, arg);
307	if (!err && arg == MMC_SECURE_TRIM1_ARG)
308		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
309out:
310	spin_lock_irq(&md->lock);
311	__blk_end_request(req, err, blk_rq_bytes(req));
312	spin_unlock_irq(&md->lock);
313
314	mmc_release_host(card->host);
315
316	return err ? 0 : 1;
317}
318
319static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
320{
321	struct mmc_blk_data *md = mq->data;
322	struct mmc_card *card = md->queue.card;
323	struct mmc_blk_request brq;
324	int ret = 1, disable_multi = 0;
325
326	mmc_claim_host(card->host);
327
328	do {
329		struct mmc_command cmd;
330		u32 readcmd, writecmd, status = 0;
331
332		memset(&brq, 0, sizeof(struct mmc_blk_request));
333		brq.mrq.cmd = &brq.cmd;
334		brq.mrq.data = &brq.data;
335
336		brq.cmd.arg = blk_rq_pos(req);
337		if (!mmc_card_blockaddr(card))
338			brq.cmd.arg <<= 9;
339		brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
340		brq.data.blksz = 512;
341		brq.stop.opcode = MMC_STOP_TRANSMISSION;
342		brq.stop.arg = 0;
343		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
344		brq.data.blocks = blk_rq_sectors(req);
345
346		/*
347		 * The block layer doesn't support all sector count
348		 * restrictions, so we need to be prepared for too big
349		 * requests.
350		 */
351		if (brq.data.blocks > card->host->max_blk_count)
352			brq.data.blocks = card->host->max_blk_count;
353
354		/*
355		 * After a read error, we redo the request one sector at a time
356		 * in order to accurately determine which sectors can be read
357		 * successfully.
358		 */
359		if (disable_multi && brq.data.blocks > 1)
360			brq.data.blocks = 1;
361
362		if (brq.data.blocks > 1) {
363			/* SPI multiblock writes terminate using a special
364			 * token, not a STOP_TRANSMISSION request.
365			 */
366			if (!mmc_host_is_spi(card->host)
367					|| rq_data_dir(req) == READ)
368				brq.mrq.stop = &brq.stop;
369			readcmd = MMC_READ_MULTIPLE_BLOCK;
370			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
371		} else {
372			brq.mrq.stop = NULL;
373			readcmd = MMC_READ_SINGLE_BLOCK;
374			writecmd = MMC_WRITE_BLOCK;
375		}
376
377		if (rq_data_dir(req) == READ) {
378			brq.cmd.opcode = readcmd;
379			brq.data.flags |= MMC_DATA_READ;
380		} else {
381			brq.cmd.opcode = writecmd;
382			brq.data.flags |= MMC_DATA_WRITE;
383		}
384
385		mmc_set_data_timeout(&brq.data, card);
386
387		brq.data.sg = mq->sg;
388		brq.data.sg_len = mmc_queue_map_sg(mq);
389
390		/*
391		 * Adjust the sg list so it is the same size as the
392		 * request.
393		 */
394		if (brq.data.blocks != blk_rq_sectors(req)) {
395			int i, data_size = brq.data.blocks << 9;
396			struct scatterlist *sg;
397
398			for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
399				data_size -= sg->length;
400				if (data_size <= 0) {
401					sg->length += data_size;
402					i++;
403					break;
404				}
405			}
406			brq.data.sg_len = i;
407		}
408
409		mmc_queue_bounce_pre(mq);
410
411		mmc_wait_for_req(card->host, &brq.mrq);
412
413		mmc_queue_bounce_post(mq);
414
415		/*
416		 * Check for errors here, but don't jump to cmd_err
417		 * until later as we need to wait for the card to leave
418		 * programming mode even when things go wrong.
419		 */
420		if (brq.cmd.error || brq.data.error || brq.stop.error) {
421			if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
422				/* Redo read one sector at a time */
423				printk(KERN_WARNING "%s: retrying using single "
424				       "block read\n", req->rq_disk->disk_name);
425				disable_multi = 1;
426				continue;
427			}
428			status = get_card_status(card, req);
429		}
430
431		if (brq.cmd.error) {
432			printk(KERN_ERR "%s: error %d sending read/write "
433			       "command, response %#x, card status %#x\n",
434			       req->rq_disk->disk_name, brq.cmd.error,
435			       brq.cmd.resp[0], status);
436		}
437
438		if (brq.data.error) {
439			if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
440				/* 'Stop' response contains card status */
441				status = brq.mrq.stop->resp[0];
442			printk(KERN_ERR "%s: error %d transferring data,"
443			       " sector %u, nr %u, card status %#x\n",
444			       req->rq_disk->disk_name, brq.data.error,
445			       (unsigned)blk_rq_pos(req),
446			       (unsigned)blk_rq_sectors(req), status);
447		}
448
449		if (brq.stop.error) {
450			printk(KERN_ERR "%s: error %d sending stop command, "
451			       "response %#x, card status %#x\n",
452			       req->rq_disk->disk_name, brq.stop.error,
453			       brq.stop.resp[0], status);
454		}
455
456		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
457			do {
458				int err;
459
460				cmd.opcode = MMC_SEND_STATUS;
461				cmd.arg = card->rca << 16;
462				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
463				err = mmc_wait_for_cmd(card->host, &cmd, 5);
464				if (err) {
465					printk(KERN_ERR "%s: error %d requesting status\n",
466					       req->rq_disk->disk_name, err);
467					goto cmd_err;
468				}
469				/*
470				 * Some cards mishandle the status bits,
471				 * so make sure to check both the busy
472				 * indication and the card state.
473				 */
474			} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
475				(R1_CURRENT_STATE(cmd.resp[0]) == 7));
476
477		}
478
479		if (brq.cmd.error || brq.stop.error || brq.data.error) {
480			if (rq_data_dir(req) == READ) {
481				/*
482				 * After an error, we redo I/O one sector at a
483				 * time, so we only reach here after trying to
484				 * read a single sector.
485				 */
486				spin_lock_irq(&md->lock);
487				ret = __blk_end_request(req, -EIO, brq.data.blksz);
488				spin_unlock_irq(&md->lock);
489				continue;
490			}
491			goto cmd_err;
492		}
493
494		/*
495		 * A block was successfully transferred.
496		 */
497		spin_lock_irq(&md->lock);
498		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
499		spin_unlock_irq(&md->lock);
500	} while (ret);
501
502	mmc_release_host(card->host);
503
504	return 1;
505
506 cmd_err:
507 	/*
508 	 * If this is an SD card and we're writing, we can first
509 	 * mark the known good sectors as ok.
510 	 *
511	 * If the card is not SD, we can still ok written sectors
512	 * as reported by the controller (which might be less than
513	 * the real number of written sectors, but never more).
514	 */
515	if (mmc_card_sd(card)) {
516		u32 blocks;
517
518		blocks = mmc_sd_num_wr_blocks(card);
519		if (blocks != (u32)-1) {
520			spin_lock_irq(&md->lock);
521			ret = __blk_end_request(req, 0, blocks << 9);
522			spin_unlock_irq(&md->lock);
523		}
524	} else {
525		spin_lock_irq(&md->lock);
526		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
527		spin_unlock_irq(&md->lock);
528	}
529
530	mmc_release_host(card->host);
531
532	spin_lock_irq(&md->lock);
533	while (ret)
534		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
535	spin_unlock_irq(&md->lock);
536
537	return 0;
538}
539
540static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
541{
542	if (req->cmd_flags & REQ_DISCARD) {
543		if (req->cmd_flags & REQ_SECURE)
544			return mmc_blk_issue_secdiscard_rq(mq, req);
545		else
546			return mmc_blk_issue_discard_rq(mq, req);
547	} else {
548		return mmc_blk_issue_rw_rq(mq, req);
549	}
550}
551
552static inline int mmc_blk_readonly(struct mmc_card *card)
553{
554	return mmc_card_readonly(card) ||
555	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
556}
557
558static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
559{
560	struct mmc_blk_data *md;
561	int devidx, ret;
562
563	devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
564	if (devidx >= MMC_NUM_MINORS)
565		return ERR_PTR(-ENOSPC);
566	__set_bit(devidx, dev_use);
567
568	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
569	if (!md) {
570		ret = -ENOMEM;
571		goto out;
572	}
573
574
575	/*
576	 * Set the read-only status based on the supported commands
577	 * and the write protect switch.
578	 */
579	md->read_only = mmc_blk_readonly(card);
580
581	md->disk = alloc_disk(1 << MMC_SHIFT);
582	if (md->disk == NULL) {
583		ret = -ENOMEM;
584		goto err_kfree;
585	}
586
587	spin_lock_init(&md->lock);
588	md->usage = 1;
589
590	ret = mmc_init_queue(&md->queue, card, &md->lock);
591	if (ret)
592		goto err_putdisk;
593
594	md->queue.issue_fn = mmc_blk_issue_rq;
595	md->queue.data = md;
596
597	md->disk->major	= MMC_BLOCK_MAJOR;
598	md->disk->first_minor = devidx << MMC_SHIFT;
599	md->disk->fops = &mmc_bdops;
600	md->disk->private_data = md;
601	md->disk->queue = md->queue.queue;
602	md->disk->driverfs_dev = &card->dev;
603
604	/*
605	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
606	 *
607	 * - be set for removable media with permanent block devices
608	 * - be unset for removable block devices with permanent media
609	 *
610	 * Since MMC block devices clearly fall under the second
611	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
612	 * should use the block device creation/destruction hotplug
613	 * messages to tell when the card is present.
614	 */
615
616	sprintf(md->disk->disk_name, "mmcblk%d", devidx);
617
618	blk_queue_logical_block_size(md->queue.queue, 512);
619
620	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
621		/*
622		 * The EXT_CSD sector count is in number or 512 byte
623		 * sectors.
624		 */
625		set_capacity(md->disk, card->ext_csd.sectors);
626	} else {
627		/*
628		 * The CSD capacity field is in units of read_blkbits.
629		 * set_capacity takes units of 512 bytes.
630		 */
631		set_capacity(md->disk,
632			card->csd.capacity << (card->csd.read_blkbits - 9));
633	}
634	return md;
635
636 err_putdisk:
637	put_disk(md->disk);
638 err_kfree:
639	kfree(md);
640 out:
641	return ERR_PTR(ret);
642}
643
644static int
645mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
646{
647	struct mmc_command cmd;
648	int err;
649
650	/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
651	if (mmc_card_blockaddr(card))
652		return 0;
653
654	mmc_claim_host(card->host);
655	cmd.opcode = MMC_SET_BLOCKLEN;
656	cmd.arg = 512;
657	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
658	err = mmc_wait_for_cmd(card->host, &cmd, 5);
659	mmc_release_host(card->host);
660
661	if (err) {
662		printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
663			md->disk->disk_name, cmd.arg, err);
664		return -EINVAL;
665	}
666
667	return 0;
668}
669
670static int mmc_blk_probe(struct mmc_card *card)
671{
672	struct mmc_blk_data *md;
673	int err;
674
675	char cap_str[10];
676
677	/*
678	 * Check that the card supports the command class(es) we need.
679	 */
680	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
681		return -ENODEV;
682
683	md = mmc_blk_alloc(card);
684	if (IS_ERR(md))
685		return PTR_ERR(md);
686
687	err = mmc_blk_set_blksize(md, card);
688	if (err)
689		goto out;
690
691	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
692			cap_str, sizeof(cap_str));
693	printk(KERN_INFO "%s: %s %s %s %s\n",
694		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
695		cap_str, md->read_only ? "(ro)" : "");
696
697	mmc_set_drvdata(card, md);
698	add_disk(md->disk);
699	return 0;
700
701 out:
702	mmc_cleanup_queue(&md->queue);
703	mmc_blk_put(md);
704
705	return err;
706}
707
708static void mmc_blk_remove(struct mmc_card *card)
709{
710	struct mmc_blk_data *md = mmc_get_drvdata(card);
711
712	if (md) {
713		/* Stop new requests from getting into the queue */
714		del_gendisk(md->disk);
715
716		/* Then flush out any already in there */
717		mmc_cleanup_queue(&md->queue);
718
719		mmc_blk_put(md);
720	}
721	mmc_set_drvdata(card, NULL);
722}
723
724#ifdef CONFIG_PM
725static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
726{
727	struct mmc_blk_data *md = mmc_get_drvdata(card);
728
729	if (md) {
730		mmc_queue_suspend(&md->queue);
731	}
732	return 0;
733}
734
735static int mmc_blk_resume(struct mmc_card *card)
736{
737	struct mmc_blk_data *md = mmc_get_drvdata(card);
738
739	if (md) {
740		mmc_blk_set_blksize(md, card);
741		mmc_queue_resume(&md->queue);
742	}
743	return 0;
744}
745#else
746#define	mmc_blk_suspend	NULL
747#define mmc_blk_resume	NULL
748#endif
749
750static struct mmc_driver mmc_driver = {
751	.drv		= {
752		.name	= "mmcblk",
753	},
754	.probe		= mmc_blk_probe,
755	.remove		= mmc_blk_remove,
756	.suspend	= mmc_blk_suspend,
757	.resume		= mmc_blk_resume,
758};
759
760static int __init mmc_blk_init(void)
761{
762	int res;
763
764	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
765	if (res)
766		goto out;
767
768	res = mmc_register_driver(&mmc_driver);
769	if (res)
770		goto out2;
771
772	return 0;
773 out2:
774	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
775 out:
776	return res;
777}
778
779static void __exit mmc_blk_exit(void)
780{
781	mmc_unregister_driver(&mmc_driver);
782	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
783}
784
785module_init(mmc_blk_init);
786module_exit(mmc_blk_exit);
787
788MODULE_LICENSE("GPL");
789MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
790