1/*
2 * MTD device concatenation layer
3 *
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5 *
6 * This code is GPL
7 *
8 * $Id: mtdconcat.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/concat.h>
18
19/*
20 * Our storage structure:
21 * Subdev points to an array of pointers to struct mtd_info objects
22 * which is allocated along with this structure
23 *
24 */
25struct mtd_concat {
26	struct mtd_info mtd;
27	int             num_subdev;
28	struct mtd_info **subdev;
29};
30
31/*
32 * how to calculate the size required for the above structure,
33 * including the pointer array subdev points to:
34 */
35#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\
36	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
37
38
39/*
40 * Given a pointer to the MTD object in the mtd_concat structure,
41 * we can retrieve the pointer to that structure with this macro.
42 */
43#define CONCAT(x)  ((struct mtd_concat *)(x))
44
45
46/*
47 * MTD methods which look up the relevant subdevice, translate the
48 * effective address and pass through to the subdevice.
49 */
50
51static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
52			size_t *retlen, u_char *buf)
53{
54	struct mtd_concat *concat = CONCAT(mtd);
55	int err = -EINVAL;
56	int i;
57
58	*retlen = 0;
59
60	for(i = 0; i < concat->num_subdev; i++)
61	{
62		struct mtd_info *subdev = concat->subdev[i];
63		size_t size, retsize;
64
65		if (from >= subdev->size)
66		{
67			size  = 0;
68			from -= subdev->size;
69		}
70		else
71		{
72			if (from + len > subdev->size)
73				size = subdev->size - from;
74			else
75				size = len;
76
77			err = subdev->read(subdev, from, size, &retsize, buf);
78
79			if(err)
80				break;
81
82			*retlen += retsize;
83			len -= size;
84			if(len == 0)
85				break;
86
87			err = -EINVAL;
88			buf += size;
89			from = 0;
90		}
91	}
92	return err;
93}
94
95static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
96			size_t *retlen, const u_char *buf)
97{
98	struct mtd_concat *concat = CONCAT(mtd);
99	int err = -EINVAL;
100	int i;
101
102	if (!(mtd->flags & MTD_WRITEABLE))
103		return -EROFS;
104
105	*retlen = 0;
106
107	for(i = 0; i < concat->num_subdev; i++)
108	{
109		struct mtd_info *subdev = concat->subdev[i];
110		size_t size, retsize;
111
112		if (to >= subdev->size)
113		{
114			size  = 0;
115			to -= subdev->size;
116		}
117		else
118		{
119			if (to + len > subdev->size)
120				size = subdev->size - to;
121			else
122				size = len;
123
124			if (!(subdev->flags & MTD_WRITEABLE))
125				err = -EROFS;
126			else
127				err = subdev->write(subdev, to, size, &retsize, buf);
128
129			if(err)
130				break;
131
132			*retlen += retsize;
133			len -= size;
134			if(len == 0)
135				break;
136
137			err = -EINVAL;
138			buf += size;
139			to = 0;
140		}
141	}
142	return err;
143}
144
145static void concat_erase_callback (struct erase_info *instr)
146{
147	wake_up((wait_queue_head_t *)instr->priv);
148}
149
150static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
151{
152	int err;
153	wait_queue_head_t waitq;
154	DECLARE_WAITQUEUE(wait, current);
155
156	/*
157	 * This code was stol^H^H^H^Hinspired by mtdchar.c
158	 */
159	init_waitqueue_head(&waitq);
160
161	erase->mtd = mtd;
162	erase->callback = concat_erase_callback;
163	erase->priv = (unsigned long)&waitq;
164
165	err = mtd->erase(mtd, erase);
166	if (!err)
167	{
168		set_current_state(TASK_UNINTERRUPTIBLE);
169		add_wait_queue(&waitq, &wait);
170		if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED)
171			schedule();
172		remove_wait_queue(&waitq, &wait);
173		set_current_state(TASK_RUNNING);
174
175		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
176	}
177	return err;
178}
179
180static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
181{
182	struct mtd_concat *concat = CONCAT(mtd);
183	struct mtd_info *subdev;
184	int i, err;
185	u_int32_t length;
186	struct erase_info *erase;
187
188	if (!(mtd->flags & MTD_WRITEABLE))
189		return -EROFS;
190
191	if(instr->addr > concat->mtd.size)
192		return -EINVAL;
193
194	if(instr->len + instr->addr > concat->mtd.size)
195		return -EINVAL;
196
197	/*
198	 * Check for proper erase block alignment of the to-be-erased area.
199	 * It is easier to do this based on the super device's erase
200	 * region info rather than looking at each particular sub-device
201	 * in turn.
202	 */
203	if (!concat->mtd.numeraseregions)
204	{	/* the easy case: device has uniform erase block size */
205		if(instr->addr & (concat->mtd.erasesize - 1))
206			return -EINVAL;
207		if(instr->len & (concat->mtd.erasesize - 1))
208			return -EINVAL;
209	}
210	else
211	{	/* device has variable erase size */
212		struct mtd_erase_region_info *erase_regions = concat->mtd.eraseregions;
213
214		/*
215		 * Find the erase region where the to-be-erased area begins:
216		 */
217		for(i = 0; i < concat->mtd.numeraseregions &&
218		           instr->addr >= erase_regions[i].offset; i++)
219			;
220		--i;
221
222		/*
223		 * Now erase_regions[i] is the region in which the
224		 * to-be-erased area begins. Verify that the starting
225		 * offset is aligned to this region's erase size:
226		 */
227		if (instr->addr & (erase_regions[i].erasesize-1))
228			return -EINVAL;
229
230		/*
231		 * now find the erase region where the to-be-erased area ends:
232		 */
233		for(; i < concat->mtd.numeraseregions &&
234		      (instr->addr + instr->len) >=  erase_regions[i].offset ; ++i)
235			;
236		--i;
237		/*
238		 * check if the ending offset is aligned to this region's erase size
239		 */
240		if ((instr->addr + instr->len) & (erase_regions[i].erasesize-1))
241			return -EINVAL;
242	}
243
244	/* make a local copy of instr to avoid modifying the caller's struct */
245	erase = kmalloc(sizeof(struct erase_info),GFP_KERNEL);
246
247	if (!erase)
248		return -ENOMEM;
249
250	*erase = *instr;
251	length = instr->len;
252
253	/*
254	 * find the subdevice where the to-be-erased area begins, adjust
255	 * starting offset to be relative to the subdevice start
256	 */
257	for(i = 0; i < concat->num_subdev; i++)
258	{
259		subdev = concat->subdev[i];
260		if(subdev->size <= erase->addr)
261			erase->addr -= subdev->size;
262		else
263			break;
264    }
265	if(i >= concat->num_subdev)	/* must never happen since size */
266		BUG();					/* limit has been verified above */
267
268	/* now do the erase: */
269	err = 0;
270	for(;length > 0; i++)	/* loop for all subevices affected by this request */
271	{
272		subdev = concat->subdev[i];		/* get current subdevice */
273
274		/* limit length to subdevice's size: */
275		if(erase->addr + length > subdev->size)
276			erase->len = subdev->size - erase->addr;
277		else
278			erase->len = length;
279
280		if (!(subdev->flags & MTD_WRITEABLE))
281		{
282			err = -EROFS;
283			break;
284		}
285		length -= erase->len;
286		if ((err = concat_dev_erase(subdev, erase)))
287		{
288			if(err == -EINVAL)	/* sanity check: must never happen since */
289				BUG();			/* block alignment has been checked above */
290			break;
291		}
292		/*
293		 * erase->addr specifies the offset of the area to be
294		 * erased *within the current subdevice*. It can be
295		 * non-zero only the first time through this loop, i.e.
296		 * for the first subdevice where blocks need to be erased.
297		 * All the following erases must begin at the start of the
298		 * current subdevice, i.e. at offset zero.
299		 */
300		erase->addr = 0;
301	}
302	instr->state = MTD_ERASE_DONE;
303	if (instr->callback)
304		instr->callback(instr);
305	kfree(erase);
306	return err;
307}
308
309static int concat_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
310{
311	struct mtd_concat *concat = CONCAT(mtd);
312	int i, err = -EINVAL;
313
314	if ((len + ofs) > mtd->size)
315		return -EINVAL;
316
317	for(i = 0; i < concat->num_subdev; i++)
318	{
319		struct mtd_info *subdev = concat->subdev[i];
320		size_t size;
321
322		if (ofs >= subdev->size)
323		{
324			size  = 0;
325			ofs -= subdev->size;
326		}
327		else
328		{
329			if (ofs + len > subdev->size)
330				size = subdev->size - ofs;
331			else
332				size = len;
333
334			err = subdev->lock(subdev, ofs, size);
335
336			if(err)
337				break;
338
339			len -= size;
340			if(len == 0)
341				break;
342
343			err = -EINVAL;
344			ofs = 0;
345		}
346	}
347	return err;
348}
349
350static int concat_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
351{
352	struct mtd_concat *concat = CONCAT(mtd);
353	int i, err = 0;
354
355	if ((len + ofs) > mtd->size)
356		return -EINVAL;
357
358	for(i = 0; i < concat->num_subdev; i++)
359	{
360		struct mtd_info *subdev = concat->subdev[i];
361		size_t size;
362
363		if (ofs >= subdev->size)
364		{
365			size  = 0;
366			ofs -= subdev->size;
367		}
368		else
369		{
370			if (ofs + len > subdev->size)
371				size = subdev->size - ofs;
372			else
373				size = len;
374
375			err = subdev->unlock(subdev, ofs, size);
376
377			if(err)
378				break;
379
380			len -= size;
381			if(len == 0)
382				break;
383
384			err = -EINVAL;
385			ofs = 0;
386		}
387	}
388	return err;
389}
390
391static void concat_sync(struct mtd_info *mtd)
392{
393	struct mtd_concat *concat = CONCAT(mtd);
394	int i;
395
396	for(i = 0; i < concat->num_subdev; i++)
397	{
398		struct mtd_info *subdev = concat->subdev[i];
399		subdev->sync(subdev);
400	}
401}
402
403static int concat_suspend(struct mtd_info *mtd)
404{
405	struct mtd_concat *concat = CONCAT(mtd);
406	int i, rc = 0;
407
408	for(i = 0; i < concat->num_subdev; i++)
409	{
410		struct mtd_info *subdev = concat->subdev[i];
411		if((rc = subdev->suspend(subdev)) < 0)
412			return rc;
413	}
414	return rc;
415}
416
417static void concat_resume(struct mtd_info *mtd)
418{
419	struct mtd_concat *concat = CONCAT(mtd);
420	int i;
421
422	for(i = 0; i < concat->num_subdev; i++)
423	{
424		struct mtd_info *subdev = concat->subdev[i];
425		subdev->resume(subdev);
426	}
427}
428
429/*
430 * This function constructs a virtual MTD device by concatenating
431 * num_devs MTD devices. A pointer to the new device object is
432 * stored to *new_dev upon success. This function does _not_
433 * register any devices: this is the caller's responsibility.
434 */
435struct mtd_info *mtd_concat_create(
436	struct mtd_info *subdev[],	/* subdevices to concatenate */
437	int num_devs,				/* number of subdevices      */
438	char *name)					/* name for the new device   */
439{
440	int i;
441	size_t size;
442	struct mtd_concat *concat;
443	u_int32_t max_erasesize, curr_erasesize;
444	int num_erase_region;
445
446	printk(KERN_NOTICE "Concatenating MTD devices:\n");
447	for(i = 0; i < num_devs; i++)
448		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
449	printk(KERN_NOTICE "into device \"%s\"\n", name);
450
451	/* allocate the device structure */
452	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
453	concat = kmalloc (size, GFP_KERNEL);
454	if(!concat)
455	{
456		printk ("memory allocation error while creating concatenated device \"%s\"\n",
457				name);
458			return NULL;
459	}
460	memset(concat, 0, size);
461	concat->subdev = (struct mtd_info **)(concat + 1);
462
463	/*
464	 * Set up the new "super" device's MTD object structure, check for
465	 * incompatibilites between the subdevices.
466	 */
467	concat->mtd.type      = subdev[0]->type;
468	concat->mtd.flags     = subdev[0]->flags;
469	concat->mtd.size      = subdev[0]->size;
470	concat->mtd.erasesize = subdev[0]->erasesize;
471	concat->mtd.oobblock  = subdev[0]->oobblock;
472	concat->mtd.oobsize   = subdev[0]->oobsize;
473	concat->mtd.ecctype   = subdev[0]->ecctype;
474	concat->mtd.eccsize   = subdev[0]->eccsize;
475
476	concat->subdev[0]   = subdev[0];
477
478	for(i = 1; i < num_devs; i++)
479	{
480		if(concat->mtd.type != subdev[i]->type)
481		{
482			kfree(concat);
483			printk ("Incompatible device type on \"%s\"\n", subdev[i]->name);
484			return NULL;
485		}
486		if(concat->mtd.flags != subdev[i]->flags)
487		{	/*
488			 * Expect all flags except MTD_WRITEABLE to be equal on
489			 * all subdevices.
490			 */
491			if((concat->mtd.flags ^ subdev[i]->flags) & ~MTD_WRITEABLE)
492			{
493				kfree(concat);
494				printk ("Incompatible device flags on \"%s\"\n", subdev[i]->name);
495				return NULL;
496			}
497			else	/* if writeable attribute differs, make super device writeable */
498				concat->mtd.flags |= subdev[i]->flags & MTD_WRITEABLE;
499		}
500		concat->mtd.size += subdev[i]->size;
501		if(concat->mtd.oobblock != subdev[i]->oobblock ||
502		   concat->mtd.oobsize  != subdev[i]->oobsize  ||
503		   concat->mtd.ecctype  != subdev[i]->ecctype  ||
504		   concat->mtd.eccsize  != subdev[i]->eccsize)
505		{
506			kfree(concat);
507			printk ("Incompatible OOB or ECC data on \"%s\"\n", subdev[i]->name);
508			return NULL;
509		}
510		concat->subdev[i] = subdev[i];
511
512	}
513
514	concat->num_subdev  = num_devs;
515	concat->mtd.name    = name;
516
517	/*
518	 * NOTE: for now, we do not provide any readv()/writev() methods
519	 *       because they are messy to implement and they are not
520	 *       used to a great extent anyway.
521	 */
522	concat->mtd.erase   = concat_erase;
523	concat->mtd.read    = concat_read;
524	concat->mtd.write   = concat_write;
525	concat->mtd.sync    = concat_sync;
526	concat->mtd.lock    = concat_lock;
527	concat->mtd.unlock  = concat_unlock;
528	concat->mtd.suspend = concat_suspend;
529	concat->mtd.resume  = concat_resume;
530
531
532	/*
533	 * Combine the erase block size info of the subdevices:
534	 *
535	 * first, walk the map of the new device and see how
536	 * many changes in erase size we have
537	 */
538	max_erasesize = curr_erasesize = subdev[0]->erasesize;
539	num_erase_region = 1;
540	for(i = 0; i < num_devs; i++)
541	{
542		if(subdev[i]->numeraseregions == 0)
543		{	/* current subdevice has uniform erase size */
544			if(subdev[i]->erasesize != curr_erasesize)
545			{	/* if it differs from the last subdevice's erase size, count it */
546				++num_erase_region;
547				curr_erasesize = subdev[i]->erasesize;
548				if(curr_erasesize > max_erasesize)
549					max_erasesize = curr_erasesize;
550			}
551		}
552		else
553		{	/* current subdevice has variable erase size */
554			int j;
555			for(j = 0; j < subdev[i]->numeraseregions; j++)
556			{	/* walk the list of erase regions, count any changes */
557				if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
558				{
559					++num_erase_region;
560					curr_erasesize = subdev[i]->eraseregions[j].erasesize;
561					if(curr_erasesize > max_erasesize)
562						max_erasesize = curr_erasesize;
563				}
564			}
565		}
566	}
567
568	if(num_erase_region == 1)
569	{	/*
570		 * All subdevices have the same uniform erase size.
571		 * This is easy:
572		 */
573		concat->mtd.erasesize = curr_erasesize;
574		concat->mtd.numeraseregions = 0;
575	}
576	else
577	{	/*
578		 * erase block size varies across the subdevices: allocate
579		 * space to store the data describing the variable erase regions
580		 */
581		struct mtd_erase_region_info *erase_region_p;
582		u_int32_t begin, position;
583
584		concat->mtd.erasesize = max_erasesize;
585		concat->mtd.numeraseregions = num_erase_region;
586		concat->mtd.eraseregions = erase_region_p = kmalloc (
587		     num_erase_region * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
588		if(!erase_region_p)
589		{
590			kfree(concat);
591			printk ("memory allocation error while creating erase region list"
592			        " for device \"%s\"\n", name);
593			return NULL;
594		}
595
596		/*
597		 * walk the map of the new device once more and fill in
598		 * in erase region info:
599		 */
600		curr_erasesize = subdev[0]->erasesize;
601		begin = position = 0;
602		for(i = 0; i < num_devs; i++)
603		{
604			if(subdev[i]->numeraseregions == 0)
605			{	/* current subdevice has uniform erase size */
606				if(subdev[i]->erasesize != curr_erasesize)
607				{	/*
608					 *  fill in an mtd_erase_region_info structure for the area
609					 *  we have walked so far:
610					 */
611					erase_region_p->offset    = begin;
612					erase_region_p->erasesize = curr_erasesize;
613					erase_region_p->numblocks = (position - begin) / curr_erasesize;
614					begin = position;
615
616					curr_erasesize = subdev[i]->erasesize;
617					++erase_region_p;
618				}
619				position += subdev[i]->size;
620			}
621			else
622			{	/* current subdevice has variable erase size */
623				int j;
624				for(j = 0; j < subdev[i]->numeraseregions; j++)
625				{	/* walk the list of erase regions, count any changes */
626					if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
627					{
628						erase_region_p->offset    = begin;
629						erase_region_p->erasesize = curr_erasesize;
630						erase_region_p->numblocks = (position - begin) / curr_erasesize;
631						begin = position;
632
633						curr_erasesize = subdev[i]->eraseregions[j].erasesize;
634						++erase_region_p;
635					}
636					position += subdev[i]->eraseregions[j].numblocks * curr_erasesize;
637				}
638			}
639		}
640		/* Now write the final entry */
641		erase_region_p->offset    = begin;
642		erase_region_p->erasesize = curr_erasesize;
643		erase_region_p->numblocks = (position - begin) / curr_erasesize;
644	}
645
646	return &concat->mtd;
647}
648
649/*
650 * This function destroys an MTD object obtained from concat_mtd_devs()
651 */
652
653void mtd_concat_destroy(struct mtd_info *mtd)
654{
655	struct mtd_concat *concat = CONCAT(mtd);
656	if(concat->mtd.numeraseregions)
657		kfree(concat->mtd.eraseregions);
658	kfree(concat);
659}
660
661
662EXPORT_SYMBOL(mtd_concat_create);
663EXPORT_SYMBOL(mtd_concat_destroy);
664
665
666MODULE_LICENSE("GPL");
667MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
668MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
669