1/*
2   raid0.c : Multiple Devices driver for Linux
3             Copyright (C) 1994-96 Marc ZYNGIER
4	     <zyngier@ufr-info-p7.ibp.fr> or
5	     <maz@gloups.fdn.fr>
6             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9   RAID-0 management functions.
10
11   This program is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 2, or (at your option)
14   any later version.
15
16   You should have received a copy of the GNU General Public License
17   (for example /usr/src/linux/COPYING); if not, write to the Free
18   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/module.h>
22#include <linux/raid/raid0.h>
23
24#define MAJOR_NR MD_MAJOR
25#define MD_DRIVER
26#define MD_PERSONALITY
27
28static void raid0_unplug(request_queue_t *q)
29{
30	mddev_t *mddev = q->queuedata;
31	raid0_conf_t *conf = mddev_to_conf(mddev);
32	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
33	int i;
34
35	for (i=0; i<mddev->raid_disks; i++) {
36		request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
37
38		if (r_queue->unplug_fn)
39			r_queue->unplug_fn(r_queue);
40	}
41}
42
43static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
44			     sector_t *error_sector)
45{
46	mddev_t *mddev = q->queuedata;
47	raid0_conf_t *conf = mddev_to_conf(mddev);
48	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
49	int i, ret = 0;
50
51	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52		struct block_device *bdev = devlist[i]->bdev;
53		request_queue_t *r_queue = bdev_get_queue(bdev);
54
55		if (!r_queue->issue_flush_fn)
56			ret = -EOPNOTSUPP;
57		else
58			ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
59	}
60	return ret;
61}
62
63static int raid0_congested(void *data, int bits)
64{
65	mddev_t *mddev = data;
66	raid0_conf_t *conf = mddev_to_conf(mddev);
67	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
68	int i, ret = 0;
69
70	for (i = 0; i < mddev->raid_disks && !ret ; i++) {
71		request_queue_t *q = bdev_get_queue(devlist[i]->bdev);
72
73		ret |= bdi_congested(&q->backing_dev_info, bits);
74	}
75	return ret;
76}
77
78
79static int create_strip_zones (mddev_t *mddev)
80{
81	int i, c, j;
82	sector_t current_offset, curr_zone_offset;
83	sector_t min_spacing;
84	raid0_conf_t *conf = mddev_to_conf(mddev);
85	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
86	struct list_head *tmp1, *tmp2;
87	struct strip_zone *zone;
88	int cnt;
89	char b[BDEVNAME_SIZE];
90
91	/*
92	 * The number of 'same size groups'
93	 */
94	conf->nr_strip_zones = 0;
95
96	ITERATE_RDEV(mddev,rdev1,tmp1) {
97		printk("raid0: looking at %s\n",
98			bdevname(rdev1->bdev,b));
99		c = 0;
100		ITERATE_RDEV(mddev,rdev2,tmp2) {
101			printk("raid0:   comparing %s(%llu)",
102			       bdevname(rdev1->bdev,b),
103			       (unsigned long long)rdev1->size);
104			printk(" with %s(%llu)\n",
105			       bdevname(rdev2->bdev,b),
106			       (unsigned long long)rdev2->size);
107			if (rdev2 == rdev1) {
108				printk("raid0:   END\n");
109				break;
110			}
111			if (rdev2->size == rdev1->size)
112			{
113				/*
114				 * Not unique, don't count it as a new
115				 * group
116				 */
117				printk("raid0:   EQUAL\n");
118				c = 1;
119				break;
120			}
121			printk("raid0:   NOT EQUAL\n");
122		}
123		if (!c) {
124			printk("raid0:   ==> UNIQUE\n");
125			conf->nr_strip_zones++;
126			printk("raid0: %d zones\n", conf->nr_strip_zones);
127		}
128	}
129	printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
130
131	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
132				conf->nr_strip_zones, GFP_KERNEL);
133	if (!conf->strip_zone)
134		return 1;
135	conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
136				conf->nr_strip_zones*mddev->raid_disks,
137				GFP_KERNEL);
138	if (!conf->devlist)
139		return 1;
140
141	/* The first zone must contain all devices, so here we check that
142	 * there is a proper alignment of slots to devices and find them all
143	 */
144	zone = &conf->strip_zone[0];
145	cnt = 0;
146	smallest = NULL;
147	zone->dev = conf->devlist;
148	ITERATE_RDEV(mddev, rdev1, tmp1) {
149		int j = rdev1->raid_disk;
150
151		if (j < 0 || j >= mddev->raid_disks) {
152			printk("raid0: bad disk number %d - aborting!\n", j);
153			goto abort;
154		}
155		if (zone->dev[j]) {
156			printk("raid0: multiple devices for %d - aborting!\n",
157				j);
158			goto abort;
159		}
160		zone->dev[j] = rdev1;
161
162		blk_queue_stack_limits(mddev->queue,
163				       rdev1->bdev->bd_disk->queue);
164		/* as we don't honour merge_bvec_fn, we must never risk
165		 * violating it, so limit ->max_sector to one PAGE, as
166		 * a one page request is never in violation.
167		 */
168
169		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
170		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
171			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
172
173		if (!smallest || (rdev1->size <smallest->size))
174			smallest = rdev1;
175		cnt++;
176	}
177	if (cnt != mddev->raid_disks) {
178		printk("raid0: too few disks (%d of %d) - aborting!\n",
179			cnt, mddev->raid_disks);
180		goto abort;
181	}
182	zone->nb_dev = cnt;
183	zone->size = smallest->size * cnt;
184	zone->zone_offset = 0;
185
186	current_offset = smallest->size;
187	curr_zone_offset = zone->size;
188
189	/* now do the other zones */
190	for (i = 1; i < conf->nr_strip_zones; i++)
191	{
192		zone = conf->strip_zone + i;
193		zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
194
195		printk("raid0: zone %d\n", i);
196		zone->dev_offset = current_offset;
197		smallest = NULL;
198		c = 0;
199
200		for (j=0; j<cnt; j++) {
201			char b[BDEVNAME_SIZE];
202			rdev = conf->strip_zone[0].dev[j];
203			printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
204			if (rdev->size > current_offset)
205			{
206				printk(" contained as device %d\n", c);
207				zone->dev[c] = rdev;
208				c++;
209				if (!smallest || (rdev->size <smallest->size)) {
210					smallest = rdev;
211					printk("  (%llu) is smallest!.\n",
212						(unsigned long long)rdev->size);
213				}
214			} else
215				printk(" nope.\n");
216		}
217
218		zone->nb_dev = c;
219		zone->size = (smallest->size - current_offset) * c;
220		printk("raid0: zone->nb_dev: %d, size: %llu\n",
221			zone->nb_dev, (unsigned long long)zone->size);
222
223		zone->zone_offset = curr_zone_offset;
224		curr_zone_offset += zone->size;
225
226		current_offset = smallest->size;
227		printk("raid0: current zone offset: %llu\n",
228			(unsigned long long)current_offset);
229	}
230
231	/* Now find appropriate hash spacing.
232	 * We want a number which causes most hash entries to cover
233	 * at most two strips, but the hash table must be at most
234	 * 1 PAGE.  We choose the smallest strip, or contiguous collection
235	 * of strips, that has big enough size.  We never consider the last
236	 * strip though as it's size has no bearing on the efficacy of the hash
237	 * table.
238	 */
239	conf->hash_spacing = curr_zone_offset;
240	min_spacing = curr_zone_offset;
241	sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
242	for (i=0; i < conf->nr_strip_zones-1; i++) {
243		sector_t sz = 0;
244		for (j=i; j<conf->nr_strip_zones-1 &&
245			     sz < min_spacing ; j++)
246			sz += conf->strip_zone[j].size;
247		if (sz >= min_spacing && sz < conf->hash_spacing)
248			conf->hash_spacing = sz;
249	}
250
251	mddev->queue->unplug_fn = raid0_unplug;
252
253	mddev->queue->issue_flush_fn = raid0_issue_flush;
254	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
255	mddev->queue->backing_dev_info.congested_data = mddev;
256
257	printk("raid0: done.\n");
258	return 0;
259 abort:
260	return 1;
261}
262
263/**
264 *	raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
265 *	@q: request queue
266 *	@bio: the buffer head that's been built up so far
267 *	@biovec: the request that could be merged to it.
268 *
269 *	Return amount of bytes we can accept at this offset
270 */
271static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
272{
273	mddev_t *mddev = q->queuedata;
274	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
275	int max;
276	unsigned int chunk_sectors = mddev->chunk_size >> 9;
277	unsigned int bio_sectors = bio->bi_size >> 9;
278
279	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
280	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
281	if (max <= biovec->bv_len && bio_sectors == 0)
282		return biovec->bv_len;
283	else
284		return max;
285}
286
287static int raid0_run (mddev_t *mddev)
288{
289	unsigned  cur=0, i=0, nb_zone;
290	s64 size;
291	raid0_conf_t *conf;
292	mdk_rdev_t *rdev;
293	struct list_head *tmp;
294
295	if (mddev->chunk_size == 0) {
296		printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
297		return -EINVAL;
298	}
299	printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
300	       mdname(mddev),
301	       mddev->chunk_size >> 9,
302	       (mddev->chunk_size>>1)-1);
303	blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
304	blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
305
306	conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
307	if (!conf)
308		goto out;
309	mddev->private = (void *)conf;
310
311	conf->strip_zone = NULL;
312	conf->devlist = NULL;
313	if (create_strip_zones (mddev))
314		goto out_free_conf;
315
316	/* calculate array device size */
317	mddev->array_size = 0;
318	ITERATE_RDEV(mddev,rdev,tmp)
319		mddev->array_size += rdev->size;
320
321	printk("raid0 : md_size is %llu blocks.\n",
322		(unsigned long long)mddev->array_size);
323	printk("raid0 : conf->hash_spacing is %llu blocks.\n",
324		(unsigned long long)conf->hash_spacing);
325	{
326		sector_t s = mddev->array_size;
327		sector_t space = conf->hash_spacing;
328		int round;
329		conf->preshift = 0;
330		if (sizeof(sector_t) > sizeof(u32)) {
331			/*shift down space and s so that sector_div will work */
332			while (space > (sector_t) (~(u32)0)) {
333				s >>= 1;
334				space >>= 1;
335				s += 1; /* force round-up */
336				conf->preshift++;
337			}
338		}
339		round = sector_div(s, (u32)space) ? 1 : 0;
340		nb_zone = s + round;
341	}
342	printk("raid0 : nb_zone is %d.\n", nb_zone);
343
344	printk("raid0 : Allocating %Zd bytes for hash.\n",
345				nb_zone*sizeof(struct strip_zone*));
346	conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
347	if (!conf->hash_table)
348		goto out_free_conf;
349	size = conf->strip_zone[cur].size;
350
351	conf->hash_table[0] = conf->strip_zone + cur;
352	for (i=1; i< nb_zone; i++) {
353		while (size <= conf->hash_spacing) {
354			cur++;
355			size += conf->strip_zone[cur].size;
356		}
357		size -= conf->hash_spacing;
358		conf->hash_table[i] = conf->strip_zone + cur;
359	}
360	if (conf->preshift) {
361		conf->hash_spacing >>= conf->preshift;
362		/* round hash_spacing up so when we divide by it, we
363		 * err on the side of too-low, which is safest
364		 */
365		conf->hash_spacing++;
366	}
367
368	/* calculate the max read-ahead size.
369	 * For read-ahead of large files to be effective, we need to
370	 * readahead at least twice a whole stripe. i.e. number of devices
371	 * multiplied by chunk size times 2.
372	 * If an individual device has an ra_pages greater than the
373	 * chunk size, then we will not drive that device as hard as it
374	 * wants.  We consider this a configuration error: a larger
375	 * chunksize should be used in that case.
376	 */
377	{
378		int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
379		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
380			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
381	}
382
383
384	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
385	return 0;
386
387out_free_conf:
388	kfree(conf->strip_zone);
389	kfree(conf->devlist);
390	kfree(conf);
391	mddev->private = NULL;
392out:
393	return -ENOMEM;
394}
395
396static int raid0_stop (mddev_t *mddev)
397{
398	raid0_conf_t *conf = mddev_to_conf(mddev);
399
400	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
401	kfree(conf->hash_table);
402	conf->hash_table = NULL;
403	kfree(conf->strip_zone);
404	conf->strip_zone = NULL;
405	kfree(conf);
406	mddev->private = NULL;
407
408	return 0;
409}
410
411static int raid0_make_request (request_queue_t *q, struct bio *bio)
412{
413	mddev_t *mddev = q->queuedata;
414	unsigned int sect_in_chunk, chunksize_bits,  chunk_size, chunk_sects;
415	raid0_conf_t *conf = mddev_to_conf(mddev);
416	struct strip_zone *zone;
417	mdk_rdev_t *tmp_dev;
418	sector_t chunk;
419	sector_t block, rsect;
420	const int rw = bio_data_dir(bio);
421
422	if (unlikely(bio_barrier(bio))) {
423		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
424		return 0;
425	}
426
427	disk_stat_inc(mddev->gendisk, ios[rw]);
428	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
429
430	chunk_size = mddev->chunk_size >> 10;
431	chunk_sects = mddev->chunk_size >> 9;
432	chunksize_bits = ffz(~chunk_size);
433	block = bio->bi_sector >> 1;
434
435
436	if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
437		struct bio_pair *bp;
438		/* Sanity check -- queue functions should prevent this happening */
439		if (bio->bi_vcnt != 1 ||
440		    bio->bi_idx != 0)
441			goto bad_map;
442		/* This is a one page bio that upper layers
443		 * refuse to split for us, so we need to split it.
444		 */
445		bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
446		if (raid0_make_request(q, &bp->bio1))
447			generic_make_request(&bp->bio1);
448		if (raid0_make_request(q, &bp->bio2))
449			generic_make_request(&bp->bio2);
450
451		bio_pair_release(bp);
452		return 0;
453	}
454
455
456	{
457		sector_t x = block >> conf->preshift;
458		sector_div(x, (u32)conf->hash_spacing);
459		zone = conf->hash_table[x];
460	}
461
462	while (block >= (zone->zone_offset + zone->size))
463		zone++;
464
465	sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
466
467
468	{
469		sector_t x =  (block - zone->zone_offset) >> chunksize_bits;
470
471		sector_div(x, zone->nb_dev);
472		chunk = x;
473
474		x = block >> chunksize_bits;
475		tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
476	}
477	rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
478		+ sect_in_chunk;
479
480	bio->bi_bdev = tmp_dev->bdev;
481	bio->bi_sector = rsect + tmp_dev->data_offset;
482
483	/*
484	 * Let the main block layer submit the IO and resolve recursion:
485	 */
486	return 1;
487
488bad_map:
489	printk("raid0_make_request bug: can't convert block across chunks"
490		" or bigger than %dk %llu %d\n", chunk_size,
491		(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
492
493	bio_io_error(bio, bio->bi_size);
494	return 0;
495}
496
497static void raid0_status (struct seq_file *seq, mddev_t *mddev)
498{
499#undef MD_DEBUG
500#ifdef MD_DEBUG
501	int j, k, h;
502	char b[BDEVNAME_SIZE];
503	raid0_conf_t *conf = mddev_to_conf(mddev);
504
505	h = 0;
506	for (j = 0; j < conf->nr_strip_zones; j++) {
507		seq_printf(seq, "      z%d", j);
508		if (conf->hash_table[h] == conf->strip_zone+j)
509			seq_printf("(h%d)", h++);
510		seq_printf(seq, "=[");
511		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
512			seq_printf (seq, "%s/", bdevname(
513				conf->strip_zone[j].dev[k]->bdev,b));
514
515		seq_printf (seq, "] zo=%d do=%d s=%d\n",
516				conf->strip_zone[j].zone_offset,
517				conf->strip_zone[j].dev_offset,
518				conf->strip_zone[j].size);
519	}
520#endif
521	seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
522	return;
523}
524
525static struct mdk_personality raid0_personality=
526{
527	.name		= "raid0",
528	.level		= 0,
529	.owner		= THIS_MODULE,
530	.make_request	= raid0_make_request,
531	.run		= raid0_run,
532	.stop		= raid0_stop,
533	.status		= raid0_status,
534};
535
536static int __init raid0_init (void)
537{
538	return register_md_personality (&raid0_personality);
539}
540
541static void raid0_exit (void)
542{
543	unregister_md_personality (&raid0_personality);
544}
545
546module_init(raid0_init);
547module_exit(raid0_exit);
548MODULE_LICENSE("GPL");
549MODULE_ALIAS("md-personality-2"); /* RAID0 */
550MODULE_ALIAS("md-raid0");
551MODULE_ALIAS("md-level-0");
552