1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author:  Andrew Christian
18 *          28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/errno.h>
27#include <linux/hdreg.h>
28#include <linux/kdev_t.h>
29#include <linux/blkdev.h>
30#include <linux/mutex.h>
31#include <linux/scatterlist.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37
38#include <asm/system.h>
39#include <asm/uaccess.h>
40
41#include "queue.h"
42
43/*
44 * max 8 partitions per card
45 */
46#define MMC_SHIFT	3
47
48/*
49 * There is one mmc_blk_data per slot.
50 */
51struct mmc_blk_data {
52	spinlock_t	lock;
53	struct gendisk	*disk;
54	struct mmc_queue queue;
55
56	unsigned int	usage;
57	unsigned int	block_bits;
58	unsigned int	read_only;
59};
60
61static DEFINE_MUTEX(open_lock);
62
63static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
64{
65	struct mmc_blk_data *md;
66
67	mutex_lock(&open_lock);
68	md = disk->private_data;
69	if (md && md->usage == 0)
70		md = NULL;
71	if (md)
72		md->usage++;
73	mutex_unlock(&open_lock);
74
75	return md;
76}
77
78static void mmc_blk_put(struct mmc_blk_data *md)
79{
80	mutex_lock(&open_lock);
81	md->usage--;
82	if (md->usage == 0) {
83		put_disk(md->disk);
84		kfree(md);
85	}
86	mutex_unlock(&open_lock);
87}
88
89static int mmc_blk_open(struct inode *inode, struct file *filp)
90{
91	struct mmc_blk_data *md;
92	int ret = -ENXIO;
93
94	md = mmc_blk_get(inode->i_bdev->bd_disk);
95	if (md) {
96		if (md->usage == 2)
97			check_disk_change(inode->i_bdev);
98		ret = 0;
99
100		if ((filp->f_mode & FMODE_WRITE) && md->read_only)
101			ret = -EROFS;
102	}
103
104	return ret;
105}
106
107static int mmc_blk_release(struct inode *inode, struct file *filp)
108{
109	struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
110
111	mmc_blk_put(md);
112	return 0;
113}
114
115static int
116mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
117{
118	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
119	geo->heads = 4;
120	geo->sectors = 16;
121	return 0;
122}
123
124static struct block_device_operations mmc_bdops = {
125	.open			= mmc_blk_open,
126	.release		= mmc_blk_release,
127	.getgeo			= mmc_blk_getgeo,
128	.owner			= THIS_MODULE,
129};
130
131struct mmc_blk_request {
132	struct mmc_request	mrq;
133	struct mmc_command	cmd;
134	struct mmc_command	stop;
135	struct mmc_data		data;
136};
137
138static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
139{
140	int err;
141	u32 blocks;
142
143	struct mmc_request mrq;
144	struct mmc_command cmd;
145	struct mmc_data data;
146	unsigned int timeout_us;
147
148	struct scatterlist sg;
149
150	memset(&cmd, 0, sizeof(struct mmc_command));
151
152	cmd.opcode = MMC_APP_CMD;
153	cmd.arg = card->rca << 16;
154	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
155
156	err = mmc_wait_for_cmd(card->host, &cmd, 0);
157	if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD))
158		return (u32)-1;
159
160	memset(&cmd, 0, sizeof(struct mmc_command));
161
162	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
163	cmd.arg = 0;
164	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
165
166	memset(&data, 0, sizeof(struct mmc_data));
167
168	data.timeout_ns = card->csd.tacc_ns * 100;
169	data.timeout_clks = card->csd.tacc_clks * 100;
170
171	timeout_us = data.timeout_ns / 1000;
172	timeout_us += data.timeout_clks * 1000 /
173		(card->host->ios.clock / 1000);
174
175	if (timeout_us > 100000) {
176		data.timeout_ns = 100000000;
177		data.timeout_clks = 0;
178	}
179
180	data.blksz = 4;
181	data.blocks = 1;
182	data.flags = MMC_DATA_READ;
183	data.sg = &sg;
184	data.sg_len = 1;
185
186	memset(&mrq, 0, sizeof(struct mmc_request));
187
188	mrq.cmd = &cmd;
189	mrq.data = &data;
190
191	sg_init_one(&sg, &blocks, 4);
192
193	mmc_wait_for_req(card->host, &mrq);
194
195	if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE)
196		return (u32)-1;
197
198	blocks = ntohl(blocks);
199
200	return blocks;
201}
202
203static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
204{
205	struct mmc_blk_data *md = mq->data;
206	struct mmc_card *card = md->queue.card;
207	struct mmc_blk_request brq;
208	int ret = 1, sg_pos, data_size;
209
210	mmc_claim_host(card->host);
211
212	do {
213		struct mmc_command cmd;
214		u32 readcmd, writecmd;
215
216		memset(&brq, 0, sizeof(struct mmc_blk_request));
217		brq.mrq.cmd = &brq.cmd;
218		brq.mrq.data = &brq.data;
219
220		brq.cmd.arg = req->sector;
221		if (!mmc_card_blockaddr(card))
222			brq.cmd.arg <<= 9;
223		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
224		brq.data.blksz = 1 << md->block_bits;
225		brq.stop.opcode = MMC_STOP_TRANSMISSION;
226		brq.stop.arg = 0;
227		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
228		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
229		if (brq.data.blocks > card->host->max_blk_count)
230			brq.data.blocks = card->host->max_blk_count;
231
232		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
233
234		/*
235		 * If the host doesn't support multiple block writes, force
236		 * block writes to single block. SD cards are excepted from
237		 * this rule as they support querying the number of
238		 * successfully written sectors.
239		 */
240		if (rq_data_dir(req) != READ &&
241		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
242		    !mmc_card_sd(card))
243			brq.data.blocks = 1;
244
245		if (brq.data.blocks > 1) {
246			brq.data.flags |= MMC_DATA_MULTI;
247			brq.mrq.stop = &brq.stop;
248			readcmd = MMC_READ_MULTIPLE_BLOCK;
249			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
250		} else {
251			brq.mrq.stop = NULL;
252			readcmd = MMC_READ_SINGLE_BLOCK;
253			writecmd = MMC_WRITE_BLOCK;
254		}
255
256		if (rq_data_dir(req) == READ) {
257			brq.cmd.opcode = readcmd;
258			brq.data.flags |= MMC_DATA_READ;
259		} else {
260			brq.cmd.opcode = writecmd;
261			brq.data.flags |= MMC_DATA_WRITE;
262		}
263
264		brq.data.sg = mq->sg;
265		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
266
267		if (brq.data.blocks !=
268		    (req->nr_sectors >> (md->block_bits - 9))) {
269			data_size = brq.data.blocks * brq.data.blksz;
270			for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) {
271				data_size -= mq->sg[sg_pos].length;
272				if (data_size <= 0) {
273					mq->sg[sg_pos].length += data_size;
274					sg_pos++;
275					break;
276				}
277			}
278			brq.data.sg_len = sg_pos;
279		}
280
281		mmc_wait_for_req(card->host, &brq.mrq);
282		if (brq.cmd.error) {
283			printk(KERN_ERR "%s: error %d sending read/write command\n",
284			       req->rq_disk->disk_name, brq.cmd.error);
285			goto cmd_err;
286		}
287
288		if (brq.data.error) {
289			printk(KERN_ERR "%s: error %d transferring data\n",
290			       req->rq_disk->disk_name, brq.data.error);
291			goto cmd_err;
292		}
293
294		if (brq.stop.error) {
295			printk(KERN_ERR "%s: error %d sending stop command\n",
296			       req->rq_disk->disk_name, brq.stop.error);
297			goto cmd_err;
298		}
299
300		if (rq_data_dir(req) != READ) {
301			do {
302				int err;
303
304				cmd.opcode = MMC_SEND_STATUS;
305				cmd.arg = card->rca << 16;
306				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
307				err = mmc_wait_for_cmd(card->host, &cmd, 5);
308				if (err) {
309					printk(KERN_ERR "%s: error %d requesting status\n",
310					       req->rq_disk->disk_name, err);
311					goto cmd_err;
312				}
313			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));
314
315		}
316
317		/*
318		 * A block was successfully transferred.
319		 */
320		spin_lock_irq(&md->lock);
321		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
322		if (!ret) {
323			/*
324			 * The whole request completed successfully.
325			 */
326			add_disk_randomness(req->rq_disk);
327			blkdev_dequeue_request(req);
328			end_that_request_last(req, 1);
329		}
330		spin_unlock_irq(&md->lock);
331	} while (ret);
332
333	mmc_release_host(card->host);
334
335	return 1;
336
337 cmd_err:
338 	/*
339 	 * If this is an SD card and we're writing, we can first
340 	 * mark the known good sectors as ok.
341 	 *
342	 * If the card is not SD, we can still ok written sectors
343	 * if the controller can do proper error reporting.
344	 *
345	 * For reads we just fail the entire chunk as that should
346	 * be safe in all cases.
347	 */
348 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
349		u32 blocks;
350		unsigned int bytes;
351
352		blocks = mmc_sd_num_wr_blocks(card);
353		if (blocks != (u32)-1) {
354			if (card->csd.write_partial)
355				bytes = blocks << md->block_bits;
356			else
357				bytes = blocks << 9;
358			spin_lock_irq(&md->lock);
359			ret = end_that_request_chunk(req, 1, bytes);
360			spin_unlock_irq(&md->lock);
361		}
362	} else if (rq_data_dir(req) != READ &&
363		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
364		spin_lock_irq(&md->lock);
365		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
366		spin_unlock_irq(&md->lock);
367	}
368
369	mmc_release_host(card->host);
370
371	spin_lock_irq(&md->lock);
372	while (ret) {
373		ret = end_that_request_chunk(req, 0,
374				req->current_nr_sectors << 9);
375	}
376
377	add_disk_randomness(req->rq_disk);
378	blkdev_dequeue_request(req);
379	end_that_request_last(req, 0);
380	spin_unlock_irq(&md->lock);
381
382	return 0;
383}
384
385#define MMC_NUM_MINORS	(256 >> MMC_SHIFT)
386
387static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
388
389static inline int mmc_blk_readonly(struct mmc_card *card)
390{
391	return mmc_card_readonly(card) ||
392	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
393}
394
395static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
396{
397	struct mmc_blk_data *md;
398	int devidx, ret;
399
400	devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
401	if (devidx >= MMC_NUM_MINORS)
402		return ERR_PTR(-ENOSPC);
403	__set_bit(devidx, dev_use);
404
405	md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
406	if (!md) {
407		ret = -ENOMEM;
408		goto out;
409	}
410
411	memset(md, 0, sizeof(struct mmc_blk_data));
412
413	/*
414	 * Set the read-only status based on the supported commands
415	 * and the write protect switch.
416	 */
417	md->read_only = mmc_blk_readonly(card);
418
419	/*
420	 * Both SD and MMC specifications state (although a bit
421	 * unclearly in the MMC case) that a block size of 512
422	 * bytes must always be supported by the card.
423	 */
424	md->block_bits = 9;
425
426	md->disk = alloc_disk(1 << MMC_SHIFT);
427	if (md->disk == NULL) {
428		ret = -ENOMEM;
429		goto err_kfree;
430	}
431
432	spin_lock_init(&md->lock);
433	md->usage = 1;
434
435	ret = mmc_init_queue(&md->queue, card, &md->lock);
436	if (ret)
437		goto err_putdisk;
438
439	md->queue.issue_fn = mmc_blk_issue_rq;
440	md->queue.data = md;
441
442	md->disk->major	= MMC_BLOCK_MAJOR;
443	md->disk->first_minor = devidx << MMC_SHIFT;
444	md->disk->fops = &mmc_bdops;
445	md->disk->private_data = md;
446	md->disk->queue = md->queue.queue;
447	md->disk->driverfs_dev = &card->dev;
448
449	/*
450	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
451	 *
452	 * - be set for removable media with permanent block devices
453	 * - be unset for removable block devices with permanent media
454	 *
455	 * Since MMC block devices clearly fall under the second
456	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
457	 * should use the block device creation/destruction hotplug
458	 * messages to tell when the card is present.
459	 */
460
461	sprintf(md->disk->disk_name, "mmcblk%d", devidx);
462
463	blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
464
465	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
466		/*
467		 * The EXT_CSD sector count is in number or 512 byte
468		 * sectors.
469		 */
470		set_capacity(md->disk, card->ext_csd.sectors);
471	} else {
472		/*
473		 * The CSD capacity field is in units of read_blkbits.
474		 * set_capacity takes units of 512 bytes.
475		 */
476		set_capacity(md->disk,
477			card->csd.capacity << (card->csd.read_blkbits - 9));
478	}
479	return md;
480
481 err_putdisk:
482	put_disk(md->disk);
483 err_kfree:
484	kfree(md);
485 out:
486	return ERR_PTR(ret);
487}
488
489static int
490mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
491{
492	struct mmc_command cmd;
493	int err;
494
495	/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
496	if (mmc_card_blockaddr(card))
497		return 0;
498
499	mmc_claim_host(card->host);
500	cmd.opcode = MMC_SET_BLOCKLEN;
501	cmd.arg = 1 << md->block_bits;
502	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
503	err = mmc_wait_for_cmd(card->host, &cmd, 5);
504	mmc_release_host(card->host);
505
506	if (err) {
507		printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
508			md->disk->disk_name, cmd.arg, err);
509		return -EINVAL;
510	}
511
512	return 0;
513}
514
515static int mmc_blk_probe(struct mmc_card *card)
516{
517	struct mmc_blk_data *md;
518	int err;
519
520	/*
521	 * Check that the card supports the command class(es) we need.
522	 */
523	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
524		return -ENODEV;
525
526	md = mmc_blk_alloc(card);
527	if (IS_ERR(md))
528		return PTR_ERR(md);
529
530	err = mmc_blk_set_blksize(md, card);
531	if (err)
532		goto out;
533
534	printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
535		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
536		(unsigned long long)(get_capacity(md->disk) >> 1),
537		md->read_only ? "(ro)" : "");
538
539	mmc_set_drvdata(card, md);
540	add_disk(md->disk);
541	return 0;
542
543 out:
544	mmc_blk_put(md);
545
546	return err;
547}
548
549static void mmc_blk_remove(struct mmc_card *card)
550{
551	struct mmc_blk_data *md = mmc_get_drvdata(card);
552
553	if (md) {
554		int devidx;
555
556		/* Stop new requests from getting into the queue */
557		del_gendisk(md->disk);
558
559		/* Then flush out any already in there */
560		mmc_cleanup_queue(&md->queue);
561
562		devidx = md->disk->first_minor >> MMC_SHIFT;
563		__clear_bit(devidx, dev_use);
564
565		mmc_blk_put(md);
566	}
567	mmc_set_drvdata(card, NULL);
568}
569
570#ifdef CONFIG_PM
571static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
572{
573	struct mmc_blk_data *md = mmc_get_drvdata(card);
574
575	if (md) {
576		mmc_queue_suspend(&md->queue);
577	}
578	return 0;
579}
580
581static int mmc_blk_resume(struct mmc_card *card)
582{
583	struct mmc_blk_data *md = mmc_get_drvdata(card);
584
585	if (md) {
586		mmc_blk_set_blksize(md, card);
587		mmc_queue_resume(&md->queue);
588	}
589	return 0;
590}
591#else
592#define	mmc_blk_suspend	NULL
593#define mmc_blk_resume	NULL
594#endif
595
596static struct mmc_driver mmc_driver = {
597	.drv		= {
598		.name	= "mmcblk",
599	},
600	.probe		= mmc_blk_probe,
601	.remove		= mmc_blk_remove,
602	.suspend	= mmc_blk_suspend,
603	.resume		= mmc_blk_resume,
604};
605
606static int __init mmc_blk_init(void)
607{
608	int res = -ENOMEM;
609
610	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
611	if (res)
612		goto out;
613
614	return mmc_register_driver(&mmc_driver);
615
616 out:
617	return res;
618}
619
620static void __exit mmc_blk_exit(void)
621{
622	mmc_unregister_driver(&mmc_driver);
623	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
624}
625
626module_init(mmc_blk_init);
627module_exit(mmc_blk_exit);
628
629MODULE_LICENSE("GPL");
630MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
631