• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching refs:mddev

30 	mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev->private;
45 mddev_t *mddev = data;
46 raid0_conf_t *conf = mddev->private;
51 if (mddev_congested(mddev, bits))
65 static void dump_zones(mddev_t *mddev)
71 raid0_conf_t *conf = mddev->private;
74 mdname(mddev));
95 static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
107 list_for_each_entry(rdev1, &mddev->disks, same_set) {
109 mdname(mddev),
115 sector_div(sectors, mddev->chunk_sectors);
116 rdev1->sectors = sectors * mddev->chunk_sectors;
118 list_for_each_entry(rdev2, &mddev->disks, same_set) {
120 mdname(mddev),
128 mdname(mddev));
137 mdname(mddev));
142 mdname(mddev));
146 mdname(mddev));
149 mdname(mddev), conf->nr_strip_zones);
153 mdname(mddev), conf->nr_strip_zones);
160 conf->nr_strip_zones*mddev->raid_disks,
173 list_for_each_entry(rdev1, &mddev->disks, same_set) {
176 if (mddev->level == 10) {
182 if (j < 0 || j >= mddev->raid_disks) {
184 "aborting!\n", mdname(mddev), j);
189 "aborting!\n", mdname(mddev), j);
194 disk_stack_limits(mddev->gendisk, rdev1->bdev,
202 blk_queue_max_segments(mddev->queue, 1);
203 blk_queue_segment_boundary(mddev->queue,
210 if (cnt != mddev->raid_disks) {
212 "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
226 dev = conf->devlist + i * mddev->raid_disks;
229 mdname(mddev), i);
237 mdname(mddev),
249 mdname(mddev),
257 mdname(mddev),
264 mdname(mddev),
267 mddev->queue->unplug_fn = raid0_unplug;
268 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
269 mddev->queue->backing_dev_info.congested_data = mddev;
275 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
277 mdname(mddev),
278 mddev->chunk_sectors << 9);
282 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
283 blk_queue_io_opt(mddev->queue,
284 (mddev->chunk_sectors << 9) * mddev->raid_disks);
286 printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
310 mddev_t *mddev = q->queuedata;
313 unsigned int chunk_sectors = mddev->chunk_sectors;
329 static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
337 list_for_each_entry(rdev, &mddev->disks, same_set)
343 static int raid0_run(mddev_t *mddev)
348 if (mddev->chunk_sectors == 0) {
350 mdname(mddev));
353 if (md_check_no_bitmap(mddev))
355 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
356 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
359 if (mddev->private == NULL) {
360 ret = create_strip_zones(mddev, &conf);
363 mddev->private = conf;
365 conf = mddev->private;
368 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
371 mdname(mddev),
372 (unsigned long long)mddev->array_sectors);
383 int stripe = mddev->raid_disks *
384 (mddev->chunk_sectors << 9) / PAGE_SIZE;
385 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
386 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
389 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
390 dump_zones(mddev);
391 md_integrity_register(mddev);
395 static int raid0_stop(mddev_t *mddev)
397 raid0_conf_t *conf = mddev->private;
399 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
403 mddev->private = NULL;
430 static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
435 raid0_conf_t *conf = mddev->private;
437 unsigned int chunk_sects = mddev->chunk_sectors;
466 static inline int is_io_in_chunk_boundary(mddev_t *mddev,
479 static int raid0_make_request(mddev_t *mddev, struct bio *bio)
487 md_barrier_request(mddev, bio);
491 chunk_sects = mddev->chunk_sectors;
492 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
508 if (raid0_make_request(mddev, &bp->bio1))
510 if (raid0_make_request(mddev, &bp->bio2))
518 zone = find_zone(mddev->private, &sector_offset);
519 tmp_dev = map_sector(mddev, zone, bio->bi_sector,
532 mdname(mddev), chunk_sects / 2,
539 static void raid0_status(struct seq_file *seq, mddev_t *mddev)
545 raid0_conf_t *conf = mddev->private;
568 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
572 static void *raid0_takeover_raid45(mddev_t *mddev)
577 if (mddev->degraded != 1) {
579 mdname(mddev),
580 mddev->degraded);
584 list_for_each_entry(rdev, &mddev->disks, same_set) {
586 if (rdev->raid_disk == mddev->raid_disks-1) {
588 mdname(mddev));
594 mddev->new_level = 0;
595 mddev->new_layout = 0;
596 mddev->new_chunk_sectors = mddev->chunk_sectors;
597 mddev->raid_disks--;
598 mddev->delta_disks = -1;
600 mddev->recovery_cp = MaxSector;
602 create_strip_zones(mddev, &priv_conf);
606 static void *raid0_takeover_raid10(mddev_t *mddev)
616 if (mddev->layout != ((1 << 8) + 2)) {
618 mdname(mddev),
619 mddev->layout);
622 if (mddev->raid_disks & 1) {
624 mdname(mddev));
627 if (mddev->degraded != (mddev->raid_disks>>1)) {
629 mdname(mddev));
634 mddev->new_level = 0;
635 mddev->new_layout = 0;
636 mddev->new_chunk_sectors = mddev->chunk_sectors;
637 mddev->delta_disks = - mddev->raid_disks / 2;
638 mddev->raid_disks += mddev->delta_disks;
639 mddev->degraded = 0;
641 mddev->recovery_cp = MaxSector;
643 create_strip_zones(mddev, &priv_conf);
647 static void *raid0_takeover(mddev_t *mddev)
654 if (mddev->level == 4)
655 return raid0_takeover_raid45(mddev);
657 if (mddev->level == 5) {
658 if (mddev->layout == ALGORITHM_PARITY_N)
659 return raid0_takeover_raid45(mddev);
662 mdname(mddev), ALGORITHM_PARITY_N);
665 if (mddev->level == 10)
666 return raid0_takeover_raid10(mddev);
671 static void raid0_quiesce(mddev_t *mddev, int state)