1/*
2 * Direct MTD block device access
3 *
4 * $Id: mtdblock_ro.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
5 *
6 * 02-nov-2000	Nicolas Pitre		Added read-modify-write with cache
7 */
8
9#include <linux/config.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/compatmac.h>
16
17#define MAJOR_NR MTD_BLOCK_MAJOR
18#define DEVICE_NAME "mtdblock"
19#define DEVICE_REQUEST mtdblock_request
20#define DEVICE_NR(device) (device)
21#define DEVICE_ON(device)
22#define DEVICE_OFF(device)
23#define DEVICE_NO_RANDOM
24#include <linux/blk.h>
25/* for old kernels... */
26#ifndef QUEUE_EMPTY
27#define QUEUE_EMPTY  (!CURRENT)
28#endif
29#if LINUX_VERSION_CODE < 0x20300
30#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].plug_tq.sync)
31#else
32#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].request_queue.plugged)
33#endif
34
35#ifdef CONFIG_DEVFS_FS
36#include <linux/devfs_fs_kernel.h>
37static void mtd_notify_add(struct mtd_info* mtd);
38static void mtd_notify_remove(struct mtd_info* mtd);
39static struct mtd_notifier notifier = {
40        mtd_notify_add,
41        mtd_notify_remove,
42        NULL
43};
44static devfs_handle_t devfs_dir_handle = NULL;
45static devfs_handle_t devfs_ro_handle[MAX_MTD_DEVICES];
46#endif
47
48static struct mtdblk_dev {
49	struct mtd_info *mtd; /* Locked */
50	int count;
51} *mtdblks[MAX_MTD_DEVICES];
52
53static spinlock_t mtdblks_lock;
54
55static int mtd_sizes[MAX_MTD_DEVICES];
56
57static int mtdblock_open(struct inode *inode, struct file *file)
58{
59	struct mtdblk_dev *mtdblk;
60	struct mtd_info *mtd;
61	int dev;
62
63	DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
64
65	if (!inode)
66		return -EINVAL;
67
68	dev = MINOR(inode->i_rdev);
69	if (dev >= MAX_MTD_DEVICES)
70		return -EINVAL;
71
72	mtd = get_mtd_device(NULL, dev);
73	if (!mtd)
74		return -ENODEV;
75	if (MTD_ABSENT == mtd->type) {
76		put_mtd_device(mtd);
77		return -ENODEV;
78	}
79
80	spin_lock(&mtdblks_lock);
81
82	/* If it's already open, no need to piss about. */
83	if (mtdblks[dev]) {
84		mtdblks[dev]->count++;
85		spin_unlock(&mtdblks_lock);
86		return 0;
87	}
88
89	/* OK, it's not open. Try to find it */
90
91	/* First we have to drop the lock, because we have to
92	   to things which might sleep.
93	*/
94	spin_unlock(&mtdblks_lock);
95
96	mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
97	if (!mtdblk) {
98		put_mtd_device(mtd);
99		return -ENOMEM;
100	}
101	memset(mtdblk, 0, sizeof(*mtdblk));
102	mtdblk->count = 1;
103	mtdblk->mtd = mtd;
104
105	/* OK, we've created a new one. Add it to the list. */
106
107	spin_lock(&mtdblks_lock);
108
109	if (mtdblks[dev]) {
110		/* Another CPU made one at the same time as us. */
111		mtdblks[dev]->count++;
112		spin_unlock(&mtdblks_lock);
113		put_mtd_device(mtdblk->mtd);
114		kfree(mtdblk);
115		return 0;
116	}
117
118	mtdblks[dev] = mtdblk;
119	mtd_sizes[dev] = mtdblk->mtd->size/1024;
120	set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
121
122	spin_unlock(&mtdblks_lock);
123
124	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
125
126	return 0;
127}
128
129static release_t mtdblock_release(struct inode *inode, struct file *file)
130{
131	int dev;
132	struct mtdblk_dev *mtdblk;
133   	DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
134
135	if (inode == NULL)
136		release_return(-ENODEV);
137
138	dev = MINOR(inode->i_rdev);
139	mtdblk = mtdblks[dev];
140
141	spin_lock(&mtdblks_lock);
142	if (!--mtdblk->count) {
143		/* It was the last usage. Free the device */
144		mtdblks[dev] = NULL;
145		spin_unlock(&mtdblks_lock);
146		if (mtdblk->mtd->sync)
147			mtdblk->mtd->sync(mtdblk->mtd);
148		put_mtd_device(mtdblk->mtd);
149		kfree(mtdblk);
150	} else {
151		spin_unlock(&mtdblks_lock);
152	}
153
154	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
155
156	release_return(0);
157}
158
159
160/*
161 * This is a special request_fn because it is executed in a process context
162 * to be able to sleep independently of the caller.  The io_request_lock
163 * is held upon entry and exit.
164 * The head of our request queue is considered active so there is no need
165 * to dequeue requests before we are done.
166 */
167static void handle_mtdblock_request(void)
168{
169	struct request *req;
170	struct mtdblk_dev *mtdblk;
171	unsigned int res;
172
173	for (;;) {
174		INIT_REQUEST;
175		req = CURRENT;
176		spin_unlock_irq(&io_request_lock);
177		mtdblk = mtdblks[MINOR(req->rq_dev)];
178		res = 0;
179
180		if (MINOR(req->rq_dev) >= MAX_MTD_DEVICES)
181			panic(__FUNCTION__": minor out of bound");
182
183		if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
184			goto end_req;
185
186		// Handle the request
187		switch (req->cmd)
188		{
189			int err;
190			size_t retlen;
191
192			case READ:
193			err = MTD_READ (mtdblk->mtd, req->sector << 9,
194					req->current_nr_sectors << 9,
195					&retlen, req->buffer);
196			if (!err)
197				res = 1;
198			break;
199		}
200
201end_req:
202		spin_lock_irq(&io_request_lock);
203		end_request(res);
204	}
205}
206
207static volatile int leaving = 0;
208static DECLARE_MUTEX_LOCKED(thread_sem);
209static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
210
211int mtdblock_thread(void *dummy)
212{
213	struct task_struct *tsk = current;
214	DECLARE_WAITQUEUE(wait, tsk);
215
216	tsk->session = 1;
217	tsk->pgrp = 1;
218	/* we might get involved when memory gets low, so use PF_MEMALLOC */
219	tsk->flags |= PF_MEMALLOC;
220	strcpy(tsk->comm, "mtdblockd");
221	tsk->tty = NULL;
222	spin_lock_irq(&tsk->sigmask_lock);
223	sigfillset(&tsk->blocked);
224	recalc_sigpending(tsk);
225	spin_unlock_irq(&tsk->sigmask_lock);
226	exit_mm(tsk);
227	exit_files(tsk);
228	exit_sighand(tsk);
229	exit_fs(tsk);
230
231	while (!leaving) {
232		add_wait_queue(&thr_wq, &wait);
233		set_current_state(TASK_INTERRUPTIBLE);
234		spin_lock_irq(&io_request_lock);
235		if (QUEUE_EMPTY || QUEUE_PLUGGED) {
236			spin_unlock_irq(&io_request_lock);
237			schedule();
238			remove_wait_queue(&thr_wq, &wait);
239		} else {
240			remove_wait_queue(&thr_wq, &wait);
241			set_current_state(TASK_RUNNING);
242			handle_mtdblock_request();
243			spin_unlock_irq(&io_request_lock);
244		}
245	}
246
247	up(&thread_sem);
248	return 0;
249}
250
251#if LINUX_VERSION_CODE < 0x20300
252#define RQFUNC_ARG void
253#else
254#define RQFUNC_ARG request_queue_t *q
255#endif
256
257static void mtdblock_request(RQFUNC_ARG)
258{
259	/* Don't do anything, except wake the thread if necessary */
260	wake_up(&thr_wq);
261}
262
263
264static int mtdblock_ioctl(struct inode * inode, struct file * file,
265		      unsigned int cmd, unsigned long arg)
266{
267	struct mtdblk_dev *mtdblk;
268
269	mtdblk = mtdblks[MINOR(inode->i_rdev)];
270
271#ifdef PARANOIA
272	if (!mtdblk)
273		BUG();
274#endif
275
276	switch (cmd) {
277	case BLKGETSIZE:   /* Return device size */
278		return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
279	case BLKGETSIZE64:
280		return put_user((u64)mtdblk->mtd->size, (u64 *)arg);
281
282	case BLKFLSBUF:
283#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
284		if(!capable(CAP_SYS_ADMIN))
285			return -EACCES;
286#endif
287		fsync_dev(inode->i_rdev);
288		invalidate_buffers(inode->i_rdev);
289		if (mtdblk->mtd->sync)
290			mtdblk->mtd->sync(mtdblk->mtd);
291		return 0;
292
293	default:
294		return -EINVAL;
295	}
296}
297
298#if LINUX_VERSION_CODE < 0x20326
299static struct file_operations mtd_fops =
300{
301	open: mtdblock_open,
302	ioctl: mtdblock_ioctl,
303	release: mtdblock_release,
304	read: block_read,
305	write: block_write
306};
307#else
308static struct block_device_operations mtd_fops =
309{
310	owner: THIS_MODULE,
311	open: mtdblock_open,
312	release: mtdblock_release,
313	ioctl: mtdblock_ioctl
314};
315#endif
316
317#ifdef CONFIG_DEVFS_FS
318/* Notification that a new device has been added. Create the devfs entry for
319 * it. */
320
321static void mtd_notify_add(struct mtd_info* mtd)
322{
323        char name[8];
324
325        if (!mtd || mtd->type == MTD_ABSENT)
326                return;
327
328        sprintf(name, "%d", mtd->index);
329        devfs_ro_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
330                        DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,
331                        S_IFBLK | S_IRUGO | S_IWUGO,
332                        &mtd_fops, NULL);
333}
334
335static void mtd_notify_remove(struct mtd_info* mtd)
336{
337        if (!mtd || mtd->type == MTD_ABSENT)
338                return;
339
340        devfs_unregister(devfs_ro_handle[mtd->index]);
341}
342#endif
343
344int __init init_mtdblock(void)
345{
346	int i;
347
348	spin_lock_init(&mtdblks_lock);
349#ifdef CONFIG_DEVFS_FS
350	if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
351	{
352		printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
353			MTD_BLOCK_MAJOR);
354		return -EAGAIN;
355	}
356
357	devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
358	register_mtd_user(&notifier);
359#else
360	if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
361		printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
362		       MTD_BLOCK_MAJOR);
363		return -EAGAIN;
364	}
365#endif
366
367	/* We fill it in at open() time. */
368	for (i=0; i< MAX_MTD_DEVICES; i++) {
369		mtd_sizes[i] = 0;
370	}
371	init_waitqueue_head(&thr_wq);
372	/* Allow the block size to default to BLOCK_SIZE. */
373	blksize_size[MAJOR_NR] = NULL;
374	blk_size[MAJOR_NR] = mtd_sizes;
375
376	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
377	kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
378	return 0;
379}
380
381static void __exit cleanup_mtdblock(void)
382{
383	leaving = 1;
384	wake_up(&thr_wq);
385	down(&thread_sem);
386#ifdef CONFIG_DEVFS_FS
387	unregister_mtd_user(&notifier);
388	devfs_unregister(devfs_dir_handle);
389	devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
390#else
391	unregister_blkdev(MAJOR_NR,DEVICE_NAME);
392#endif
393	blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
394	blksize_size[MAJOR_NR] = NULL;
395	blk_size[MAJOR_NR] = NULL;
396}
397
398module_init(init_mtdblock);
399module_exit(cleanup_mtdblock);
400
401
402MODULE_LICENSE("GPL");
403MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
404MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
405