1/*
2 * MTD device concatenation layer
3 *
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5 *
6 * NAND support by Christian Gan <cgan@iders.ca>
7 *
8 * This code is GPL
9 *
10 * $Id: mtdconcat.c,v 1.1.1.1 2007/08/03 18:52:43 Exp $
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17#include <linux/types.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/concat.h>
21
22#include <asm/div64.h>
23
24/*
25 * Our storage structure:
26 * Subdev points to an array of pointers to struct mtd_info objects
27 * which is allocated along with this structure
28 *
29 */
30struct mtd_concat {
31	struct mtd_info mtd;
32	int num_subdev;
33	struct mtd_info **subdev;
34};
35
36/*
37 * how to calculate the size required for the above structure,
38 * including the pointer array subdev points to:
39 */
40#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\
41	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
42
43/*
44 * Given a pointer to the MTD object in the mtd_concat structure,
45 * we can retrieve the pointer to that structure with this macro.
46 */
47#define CONCAT(x)  ((struct mtd_concat *)(x))
48
49/*
50 * MTD methods which look up the relevant subdevice, translate the
51 * effective address and pass through to the subdevice.
52 */
53
54static int
55concat_read(struct mtd_info *mtd, loff_t from, size_t len,
56	    size_t * retlen, u_char * buf)
57{
58	struct mtd_concat *concat = CONCAT(mtd);
59	int ret = 0, err;
60	int i;
61
62	*retlen = 0;
63
64	for (i = 0; i < concat->num_subdev; i++) {
65		struct mtd_info *subdev = concat->subdev[i];
66		size_t size, retsize;
67
68		if (from >= subdev->size) {
69			/* Not destined for this subdev */
70			size = 0;
71			from -= subdev->size;
72			continue;
73		}
74		if (from + len > subdev->size)
75			/* First part goes into this subdev */
76			size = subdev->size - from;
77		else
78			/* Entire transaction goes into this subdev */
79			size = len;
80
81		err = subdev->read(subdev, from, size, &retsize, buf);
82
83		/* Save information about bitflips! */
84		if (unlikely(err)) {
85			if (err == -EBADMSG) {
86				mtd->ecc_stats.failed++;
87				ret = err;
88			} else if (err == -EUCLEAN) {
89				mtd->ecc_stats.corrected++;
90				/* Do not overwrite -EBADMSG !! */
91				if (!ret)
92					ret = err;
93			} else
94				return err;
95		}
96
97		*retlen += retsize;
98		len -= size;
99		if (len == 0)
100			return ret;
101
102		buf += size;
103		from = 0;
104	}
105	return -EINVAL;
106}
107
108static int
109concat_write(struct mtd_info *mtd, loff_t to, size_t len,
110	     size_t * retlen, const u_char * buf)
111{
112	struct mtd_concat *concat = CONCAT(mtd);
113	int err = -EINVAL;
114	int i;
115
116	if (!(mtd->flags & MTD_WRITEABLE))
117		return -EROFS;
118
119	*retlen = 0;
120
121	for (i = 0; i < concat->num_subdev; i++) {
122		struct mtd_info *subdev = concat->subdev[i];
123		size_t size, retsize;
124
125		if (to >= subdev->size) {
126			size = 0;
127			to -= subdev->size;
128			continue;
129		}
130		if (to + len > subdev->size)
131			size = subdev->size - to;
132		else
133			size = len;
134
135		if (!(subdev->flags & MTD_WRITEABLE))
136			err = -EROFS;
137		else
138			err = subdev->write(subdev, to, size, &retsize, buf);
139
140		if (err)
141			break;
142
143		*retlen += retsize;
144		len -= size;
145		if (len == 0)
146			break;
147
148		err = -EINVAL;
149		buf += size;
150		to = 0;
151	}
152	return err;
153}
154
155static int
156concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
157		unsigned long count, loff_t to, size_t * retlen)
158{
159	struct mtd_concat *concat = CONCAT(mtd);
160	struct kvec *vecs_copy;
161	unsigned long entry_low, entry_high;
162	size_t total_len = 0;
163	int i;
164	int err = -EINVAL;
165
166	if (!(mtd->flags & MTD_WRITEABLE))
167		return -EROFS;
168
169	*retlen = 0;
170
171	/* Calculate total length of data */
172	for (i = 0; i < count; i++)
173		total_len += vecs[i].iov_len;
174
175	/* Do not allow write past end of device */
176	if ((to + total_len) > mtd->size)
177		return -EINVAL;
178
179	/* Check alignment */
180	if (mtd->writesize > 1) {
181		loff_t __to = to;
182		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
183			return -EINVAL;
184	}
185
186	/* make a copy of vecs */
187	vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
188	if (!vecs_copy)
189		return -ENOMEM;
190	memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
191
192	entry_low = 0;
193	for (i = 0; i < concat->num_subdev; i++) {
194		struct mtd_info *subdev = concat->subdev[i];
195		size_t size, wsize, retsize, old_iov_len;
196
197		if (to >= subdev->size) {
198			to -= subdev->size;
199			continue;
200		}
201
202		size = min(total_len, (size_t)(subdev->size - to));
203		wsize = size; /* store for future use */
204
205		entry_high = entry_low;
206		while (entry_high < count) {
207			if (size <= vecs_copy[entry_high].iov_len)
208				break;
209			size -= vecs_copy[entry_high++].iov_len;
210		}
211
212		old_iov_len = vecs_copy[entry_high].iov_len;
213		vecs_copy[entry_high].iov_len = size;
214
215		if (!(subdev->flags & MTD_WRITEABLE))
216			err = -EROFS;
217		else
218			err = subdev->writev(subdev, &vecs_copy[entry_low],
219				entry_high - entry_low + 1, to, &retsize);
220
221		vecs_copy[entry_high].iov_len = old_iov_len - size;
222		vecs_copy[entry_high].iov_base += size;
223
224		entry_low = entry_high;
225
226		if (err)
227			break;
228
229		*retlen += retsize;
230		total_len -= wsize;
231
232		if (total_len == 0)
233			break;
234
235		err = -EINVAL;
236		to = 0;
237	}
238
239	kfree(vecs_copy);
240	return err;
241}
242
243static int
244concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
245{
246	struct mtd_concat *concat = CONCAT(mtd);
247	struct mtd_oob_ops devops = *ops;
248	int i, err, ret = 0;
249
250	ops->retlen = ops->oobretlen = 0;
251
252	for (i = 0; i < concat->num_subdev; i++) {
253		struct mtd_info *subdev = concat->subdev[i];
254
255		if (from >= subdev->size) {
256			from -= subdev->size;
257			continue;
258		}
259
260		/* partial read ? */
261		if (from + devops.len > subdev->size)
262			devops.len = subdev->size - from;
263
264		err = subdev->read_oob(subdev, from, &devops);
265		ops->retlen += devops.retlen;
266		ops->oobretlen += devops.oobretlen;
267
268		/* Save information about bitflips! */
269		if (unlikely(err)) {
270			if (err == -EBADMSG) {
271				mtd->ecc_stats.failed++;
272				ret = err;
273			} else if (err == -EUCLEAN) {
274				mtd->ecc_stats.corrected++;
275				/* Do not overwrite -EBADMSG !! */
276				if (!ret)
277					ret = err;
278			} else
279				return err;
280		}
281
282		if (devops.datbuf) {
283			devops.len = ops->len - ops->retlen;
284			if (!devops.len)
285				return ret;
286			devops.datbuf += devops.retlen;
287		}
288		if (devops.oobbuf) {
289			devops.ooblen = ops->ooblen - ops->oobretlen;
290			if (!devops.ooblen)
291				return ret;
292			devops.oobbuf += ops->oobretlen;
293		}
294
295		from = 0;
296	}
297	return -EINVAL;
298}
299
300static int
301concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
302{
303	struct mtd_concat *concat = CONCAT(mtd);
304	struct mtd_oob_ops devops = *ops;
305	int i, err;
306
307	if (!(mtd->flags & MTD_WRITEABLE))
308		return -EROFS;
309
310	ops->retlen = 0;
311
312	for (i = 0; i < concat->num_subdev; i++) {
313		struct mtd_info *subdev = concat->subdev[i];
314
315		if (to >= subdev->size) {
316			to -= subdev->size;
317			continue;
318		}
319
320		/* partial write ? */
321		if (to + devops.len > subdev->size)
322			devops.len = subdev->size - to;
323
324		err = subdev->write_oob(subdev, to, &devops);
325		ops->retlen += devops.retlen;
326		if (err)
327			return err;
328
329		if (devops.datbuf) {
330			devops.len = ops->len - ops->retlen;
331			if (!devops.len)
332				return 0;
333			devops.datbuf += devops.retlen;
334		}
335		if (devops.oobbuf) {
336			devops.ooblen = ops->ooblen - ops->oobretlen;
337			if (!devops.ooblen)
338				return 0;
339			devops.oobbuf += devops.oobretlen;
340		}
341		to = 0;
342	}
343	return -EINVAL;
344}
345
346static void concat_erase_callback(struct erase_info *instr)
347{
348	wake_up((wait_queue_head_t *) instr->priv);
349}
350
351static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
352{
353	int err;
354	wait_queue_head_t waitq;
355	DECLARE_WAITQUEUE(wait, current);
356
357	/*
358	 * This code was stol^H^H^H^Hinspired by mtdchar.c
359	 */
360	init_waitqueue_head(&waitq);
361
362	erase->mtd = mtd;
363	erase->callback = concat_erase_callback;
364	erase->priv = (unsigned long) &waitq;
365
366	err = mtd->erase(mtd, erase);
367	if (!err) {
368		set_current_state(TASK_UNINTERRUPTIBLE);
369		add_wait_queue(&waitq, &wait);
370		if (erase->state != MTD_ERASE_DONE
371		    && erase->state != MTD_ERASE_FAILED)
372			schedule();
373		remove_wait_queue(&waitq, &wait);
374		set_current_state(TASK_RUNNING);
375
376		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
377	}
378	return err;
379}
380
381static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
382{
383	struct mtd_concat *concat = CONCAT(mtd);
384	struct mtd_info *subdev;
385	int i, err;
386	u_int32_t length, offset = 0;
387	struct erase_info *erase;
388
389	if (!(mtd->flags & MTD_WRITEABLE))
390		return -EROFS;
391
392	if (instr->addr > concat->mtd.size)
393		return -EINVAL;
394
395	if (instr->len + instr->addr > concat->mtd.size)
396		return -EINVAL;
397
398	/*
399	 * Check for proper erase block alignment of the to-be-erased area.
400	 * It is easier to do this based on the super device's erase
401	 * region info rather than looking at each particular sub-device
402	 * in turn.
403	 */
404	if (!concat->mtd.numeraseregions) {
405		/* the easy case: device has uniform erase block size */
406		if (instr->addr & (concat->mtd.erasesize - 1))
407			return -EINVAL;
408		if (instr->len & (concat->mtd.erasesize - 1))
409			return -EINVAL;
410	} else {
411		/* device has variable erase size */
412		struct mtd_erase_region_info *erase_regions =
413		    concat->mtd.eraseregions;
414
415		/*
416		 * Find the erase region where the to-be-erased area begins:
417		 */
418		for (i = 0; i < concat->mtd.numeraseregions &&
419		     instr->addr >= erase_regions[i].offset; i++) ;
420		--i;
421
422		/*
423		 * Now erase_regions[i] is the region in which the
424		 * to-be-erased area begins. Verify that the starting
425		 * offset is aligned to this region's erase size:
426		 */
427		if (instr->addr & (erase_regions[i].erasesize - 1))
428			return -EINVAL;
429
430		/*
431		 * now find the erase region where the to-be-erased area ends:
432		 */
433		for (; i < concat->mtd.numeraseregions &&
434		     (instr->addr + instr->len) >= erase_regions[i].offset;
435		     ++i) ;
436		--i;
437		/*
438		 * check if the ending offset is aligned to this region's erase size
439		 */
440		if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
441						  1))
442			return -EINVAL;
443	}
444
445	instr->fail_addr = 0xffffffff;
446
447	/* make a local copy of instr to avoid modifying the caller's struct */
448	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
449
450	if (!erase)
451		return -ENOMEM;
452
453	*erase = *instr;
454	length = instr->len;
455
456	/*
457	 * find the subdevice where the to-be-erased area begins, adjust
458	 * starting offset to be relative to the subdevice start
459	 */
460	for (i = 0; i < concat->num_subdev; i++) {
461		subdev = concat->subdev[i];
462		if (subdev->size <= erase->addr) {
463			erase->addr -= subdev->size;
464			offset += subdev->size;
465		} else {
466			break;
467		}
468	}
469
470	/* must never happen since size limit has been verified above */
471	BUG_ON(i >= concat->num_subdev);
472
473	/* now do the erase: */
474	err = 0;
475	for (; length > 0; i++) {
476		/* loop for all subdevices affected by this request */
477		subdev = concat->subdev[i];	/* get current subdevice */
478
479		/* limit length to subdevice's size: */
480		if (erase->addr + length > subdev->size)
481			erase->len = subdev->size - erase->addr;
482		else
483			erase->len = length;
484
485		if (!(subdev->flags & MTD_WRITEABLE)) {
486			err = -EROFS;
487			break;
488		}
489		length -= erase->len;
490		if ((err = concat_dev_erase(subdev, erase))) {
491			/* sanity check: should never happen since
492			 * block alignment has been checked above */
493			BUG_ON(err == -EINVAL);
494			if (erase->fail_addr != 0xffffffff)
495				instr->fail_addr = erase->fail_addr + offset;
496			break;
497		}
498		/*
499		 * erase->addr specifies the offset of the area to be
500		 * erased *within the current subdevice*. It can be
501		 * non-zero only the first time through this loop, i.e.
502		 * for the first subdevice where blocks need to be erased.
503		 * All the following erases must begin at the start of the
504		 * current subdevice, i.e. at offset zero.
505		 */
506		erase->addr = 0;
507		offset += subdev->size;
508	}
509	instr->state = erase->state;
510	kfree(erase);
511	if (err)
512		return err;
513
514	if (instr->callback)
515		instr->callback(instr);
516	return 0;
517}
518
519static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
520{
521	struct mtd_concat *concat = CONCAT(mtd);
522	int i, err = -EINVAL;
523
524	if ((len + ofs) > mtd->size)
525		return -EINVAL;
526
527	for (i = 0; i < concat->num_subdev; i++) {
528		struct mtd_info *subdev = concat->subdev[i];
529		size_t size;
530
531		if (ofs >= subdev->size) {
532			size = 0;
533			ofs -= subdev->size;
534			continue;
535		}
536		if (ofs + len > subdev->size)
537			size = subdev->size - ofs;
538		else
539			size = len;
540
541		err = subdev->lock(subdev, ofs, size);
542
543		if (err)
544			break;
545
546		len -= size;
547		if (len == 0)
548			break;
549
550		err = -EINVAL;
551		ofs = 0;
552	}
553
554	return err;
555}
556
557static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
558{
559	struct mtd_concat *concat = CONCAT(mtd);
560	int i, err = 0;
561
562	if ((len + ofs) > mtd->size)
563		return -EINVAL;
564
565	for (i = 0; i < concat->num_subdev; i++) {
566		struct mtd_info *subdev = concat->subdev[i];
567		size_t size;
568
569		if (ofs >= subdev->size) {
570			size = 0;
571			ofs -= subdev->size;
572			continue;
573		}
574		if (ofs + len > subdev->size)
575			size = subdev->size - ofs;
576		else
577			size = len;
578
579		err = subdev->unlock(subdev, ofs, size);
580
581		if (err)
582			break;
583
584		len -= size;
585		if (len == 0)
586			break;
587
588		err = -EINVAL;
589		ofs = 0;
590	}
591
592	return err;
593}
594
595static void concat_sync(struct mtd_info *mtd)
596{
597	struct mtd_concat *concat = CONCAT(mtd);
598	int i;
599
600	for (i = 0; i < concat->num_subdev; i++) {
601		struct mtd_info *subdev = concat->subdev[i];
602		subdev->sync(subdev);
603	}
604}
605
606static int concat_suspend(struct mtd_info *mtd)
607{
608	struct mtd_concat *concat = CONCAT(mtd);
609	int i, rc = 0;
610
611	for (i = 0; i < concat->num_subdev; i++) {
612		struct mtd_info *subdev = concat->subdev[i];
613		if ((rc = subdev->suspend(subdev)) < 0)
614			return rc;
615	}
616	return rc;
617}
618
619static void concat_resume(struct mtd_info *mtd)
620{
621	struct mtd_concat *concat = CONCAT(mtd);
622	int i;
623
624	for (i = 0; i < concat->num_subdev; i++) {
625		struct mtd_info *subdev = concat->subdev[i];
626		subdev->resume(subdev);
627	}
628}
629
630static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
631{
632	struct mtd_concat *concat = CONCAT(mtd);
633	int i, res = 0;
634
635	if (!concat->subdev[0]->block_isbad)
636		return res;
637
638	if (ofs > mtd->size)
639		return -EINVAL;
640
641	for (i = 0; i < concat->num_subdev; i++) {
642		struct mtd_info *subdev = concat->subdev[i];
643
644		if (ofs >= subdev->size) {
645			ofs -= subdev->size;
646			continue;
647		}
648
649		res = subdev->block_isbad(subdev, ofs);
650		break;
651	}
652
653	return res;
654}
655
656static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
657{
658	struct mtd_concat *concat = CONCAT(mtd);
659	int i, err = -EINVAL;
660
661	if (!concat->subdev[0]->block_markbad)
662		return 0;
663
664	if (ofs > mtd->size)
665		return -EINVAL;
666
667	for (i = 0; i < concat->num_subdev; i++) {
668		struct mtd_info *subdev = concat->subdev[i];
669
670		if (ofs >= subdev->size) {
671			ofs -= subdev->size;
672			continue;
673		}
674
675		err = subdev->block_markbad(subdev, ofs);
676		if (!err)
677			mtd->ecc_stats.badblocks++;
678		break;
679	}
680
681	return err;
682}
683
684/*
685 * This function constructs a virtual MTD device by concatenating
686 * num_devs MTD devices. A pointer to the new device object is
687 * stored to *new_dev upon success. This function does _not_
688 * register any devices: this is the caller's responsibility.
689 */
690struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
691				   int num_devs,	/* number of subdevices      */
692				   char *name)
693{				/* name for the new device   */
694	int i;
695	size_t size;
696	struct mtd_concat *concat;
697	u_int32_t max_erasesize, curr_erasesize;
698	int num_erase_region;
699
700	printk(KERN_NOTICE "Concatenating MTD devices:\n");
701	for (i = 0; i < num_devs; i++)
702		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
703	printk(KERN_NOTICE "into device \"%s\"\n", name);
704
705	/* allocate the device structure */
706	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
707	concat = kzalloc(size, GFP_KERNEL);
708	if (!concat) {
709		printk
710		    ("memory allocation error while creating concatenated device \"%s\"\n",
711		     name);
712		return NULL;
713	}
714	concat->subdev = (struct mtd_info **) (concat + 1);
715
716	/*
717	 * Set up the new "super" device's MTD object structure, check for
718	 * incompatibilites between the subdevices.
719	 */
720	concat->mtd.type = subdev[0]->type;
721	concat->mtd.flags = subdev[0]->flags;
722	concat->mtd.size = subdev[0]->size;
723	concat->mtd.erasesize = subdev[0]->erasesize;
724	concat->mtd.writesize = subdev[0]->writesize;
725	concat->mtd.oobsize = subdev[0]->oobsize;
726	concat->mtd.oobavail = subdev[0]->oobavail;
727	if (subdev[0]->writev)
728		concat->mtd.writev = concat_writev;
729	if (subdev[0]->read_oob)
730		concat->mtd.read_oob = concat_read_oob;
731	if (subdev[0]->write_oob)
732		concat->mtd.write_oob = concat_write_oob;
733	if (subdev[0]->block_isbad)
734		concat->mtd.block_isbad = concat_block_isbad;
735	if (subdev[0]->block_markbad)
736		concat->mtd.block_markbad = concat_block_markbad;
737
738	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
739
740	concat->subdev[0] = subdev[0];
741
742	for (i = 1; i < num_devs; i++) {
743		if (concat->mtd.type != subdev[i]->type) {
744			kfree(concat);
745			printk("Incompatible device type on \"%s\"\n",
746			       subdev[i]->name);
747			return NULL;
748		}
749		if (concat->mtd.flags != subdev[i]->flags) {
750			/*
751			 * Expect all flags except MTD_WRITEABLE to be
752			 * equal on all subdevices.
753			 */
754			if ((concat->mtd.flags ^ subdev[i]->
755			     flags) & ~MTD_WRITEABLE) {
756				kfree(concat);
757				printk("Incompatible device flags on \"%s\"\n",
758				       subdev[i]->name);
759				return NULL;
760			} else
761				/* if writeable attribute differs,
762				   make super device writeable */
763				concat->mtd.flags |=
764				    subdev[i]->flags & MTD_WRITEABLE;
765		}
766		concat->mtd.size += subdev[i]->size;
767		concat->mtd.ecc_stats.badblocks +=
768			subdev[i]->ecc_stats.badblocks;
769		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
770		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
771		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
772		    !concat->mtd.read_oob  != !subdev[i]->read_oob ||
773		    !concat->mtd.write_oob != !subdev[i]->write_oob) {
774			kfree(concat);
775			printk("Incompatible OOB or ECC data on \"%s\"\n",
776			       subdev[i]->name);
777			return NULL;
778		}
779		concat->subdev[i] = subdev[i];
780
781	}
782
783	concat->mtd.ecclayout = subdev[0]->ecclayout;
784
785	concat->num_subdev = num_devs;
786	concat->mtd.name = name;
787
788	concat->mtd.erase = concat_erase;
789	concat->mtd.read = concat_read;
790	concat->mtd.write = concat_write;
791	concat->mtd.sync = concat_sync;
792	concat->mtd.lock = concat_lock;
793	concat->mtd.unlock = concat_unlock;
794	concat->mtd.suspend = concat_suspend;
795	concat->mtd.resume = concat_resume;
796
797	/*
798	 * Combine the erase block size info of the subdevices:
799	 *
800	 * first, walk the map of the new device and see how
801	 * many changes in erase size we have
802	 */
803	max_erasesize = curr_erasesize = subdev[0]->erasesize;
804	num_erase_region = 1;
805	for (i = 0; i < num_devs; i++) {
806		if (subdev[i]->numeraseregions == 0) {
807			/* current subdevice has uniform erase size */
808			if (subdev[i]->erasesize != curr_erasesize) {
809				/* if it differs from the last subdevice's erase size, count it */
810				++num_erase_region;
811				curr_erasesize = subdev[i]->erasesize;
812				if (curr_erasesize > max_erasesize)
813					max_erasesize = curr_erasesize;
814			}
815		} else {
816			/* current subdevice has variable erase size */
817			int j;
818			for (j = 0; j < subdev[i]->numeraseregions; j++) {
819
820				/* walk the list of erase regions, count any changes */
821				if (subdev[i]->eraseregions[j].erasesize !=
822				    curr_erasesize) {
823					++num_erase_region;
824					curr_erasesize =
825					    subdev[i]->eraseregions[j].
826					    erasesize;
827					if (curr_erasesize > max_erasesize)
828						max_erasesize = curr_erasesize;
829				}
830			}
831		}
832	}
833
834	if (num_erase_region == 1) {
835		/*
836		 * All subdevices have the same uniform erase size.
837		 * This is easy:
838		 */
839		concat->mtd.erasesize = curr_erasesize;
840		concat->mtd.numeraseregions = 0;
841	} else {
842		/*
843		 * erase block size varies across the subdevices: allocate
844		 * space to store the data describing the variable erase regions
845		 */
846		struct mtd_erase_region_info *erase_region_p;
847		u_int32_t begin, position;
848
849		concat->mtd.erasesize = max_erasesize;
850		concat->mtd.numeraseregions = num_erase_region;
851		concat->mtd.eraseregions = erase_region_p =
852		    kmalloc(num_erase_region *
853			    sizeof (struct mtd_erase_region_info), GFP_KERNEL);
854		if (!erase_region_p) {
855			kfree(concat);
856			printk
857			    ("memory allocation error while creating erase region list"
858			     " for device \"%s\"\n", name);
859			return NULL;
860		}
861
862		/*
863		 * walk the map of the new device once more and fill in
864		 * in erase region info:
865		 */
866		curr_erasesize = subdev[0]->erasesize;
867		begin = position = 0;
868		for (i = 0; i < num_devs; i++) {
869			if (subdev[i]->numeraseregions == 0) {
870				/* current subdevice has uniform erase size */
871				if (subdev[i]->erasesize != curr_erasesize) {
872					/*
873					 *  fill in an mtd_erase_region_info structure for the area
874					 *  we have walked so far:
875					 */
876					erase_region_p->offset = begin;
877					erase_region_p->erasesize =
878					    curr_erasesize;
879					erase_region_p->numblocks =
880					    (position - begin) / curr_erasesize;
881					begin = position;
882
883					curr_erasesize = subdev[i]->erasesize;
884					++erase_region_p;
885				}
886				position += subdev[i]->size;
887			} else {
888				/* current subdevice has variable erase size */
889				int j;
890				for (j = 0; j < subdev[i]->numeraseregions; j++) {
891					/* walk the list of erase regions, count any changes */
892					if (subdev[i]->eraseregions[j].
893					    erasesize != curr_erasesize) {
894						erase_region_p->offset = begin;
895						erase_region_p->erasesize =
896						    curr_erasesize;
897						erase_region_p->numblocks =
898						    (position -
899						     begin) / curr_erasesize;
900						begin = position;
901
902						curr_erasesize =
903						    subdev[i]->eraseregions[j].
904						    erasesize;
905						++erase_region_p;
906					}
907					position +=
908					    subdev[i]->eraseregions[j].
909					    numblocks * curr_erasesize;
910				}
911			}
912		}
913		/* Now write the final entry */
914		erase_region_p->offset = begin;
915		erase_region_p->erasesize = curr_erasesize;
916		erase_region_p->numblocks = (position - begin) / curr_erasesize;
917	}
918
919	return &concat->mtd;
920}
921
922/*
923 * This function destroys an MTD object obtained from concat_mtd_devs()
924 */
925
926void mtd_concat_destroy(struct mtd_info *mtd)
927{
928	struct mtd_concat *concat = CONCAT(mtd);
929	if (concat->mtd.numeraseregions)
930		kfree(concat->mtd.eraseregions);
931	kfree(concat);
932}
933
934EXPORT_SYMBOL(mtd_concat_create);
935EXPORT_SYMBOL(mtd_concat_destroy);
936
937MODULE_LICENSE("GPL");
938MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
939MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
940