• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching refs:mddev

213 			md_wakeup_thread(conf->mddev->thread);
219 md_wakeup_thread(conf->mddev->thread);
226 md_wakeup_thread(conf->mddev->thread);
386 if (conf->mddev->reshape_position == MaxSector)
387 return conf->mddev->degraded > conf->max_degraded;
436 static void unplug_slaves(mddev_t *mddev);
1339 if (conf->mddev->gendisk)
1341 "raid%d-%s", conf->level, mdname(conf->mddev));
1344 "raid%d-%p", conf->level, conf->mddev);
1419 err = md_allow_write(conf->mddev);
1464 unplug_slaves(conf->mddev)
1586 mdname(conf->mddev), STRIPE_SECTORS,
1602 if (conf->mddev->degraded >= conf->max_degraded)
1606 mdname(conf->mddev),
1615 mdname(conf->mddev),
1623 mdname(conf->mddev), bdn);
1631 md_error(conf->mddev, rdev);
1634 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1660 md_error(conf->mddev, conf->disks[i].rdev);
1662 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1691 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1694 raid5_conf_t *conf = mddev->private;
1698 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1702 mddev->degraded++;
1707 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1714 mdname(mddev),
1716 mdname(mddev),
1717 conf->raid_disks - mddev->degraded);
2043 mdname(conf->mddev));
2176 if (conf->mddev->bitmap && firstwrite) {
2177 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2239 md_error(conf->mddev, rdev);
2259 md_write_end(conf->mddev);
2274 md_write_end(conf->mddev);
2306 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2312 md_wakeup_thread(conf->mddev->thread);
2517 md_write_end(conf->mddev);
2527 bitmap_endwrite(conf->mddev->bitmap,
2537 md_wakeup_thread(conf->mddev->thread);
2738 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2739 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2890 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2891 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3104 rdev_dec_pending(blocked_rdev, conf->mddev);
3123 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3199 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3206 if (s.failed == 1 && !conf->mddev->ro &&
3263 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3275 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3290 md_wakeup_thread(conf->mddev->thread);
3396 rdev_dec_pending(blocked_rdev, conf->mddev);
3415 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3498 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3505 if (s.failed <= 2 && !conf->mddev->ro)
3564 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3576 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3592 md_wakeup_thread(conf->mddev->thread);
3637 static void unplug_slaves(mddev_t *mddev)
3639 raid5_conf_t *conf = mddev->private;
3654 rdev_dec_pending(rdev, mddev);
3671 md_wakeup_thread(conf->mddev->thread);
3675 unplug_slaves(conf->mddev);
3687 mddev_t *mddev = q->queuedata;
3688 md_raid5_unplug_device(mddev->private);
3691 int md_raid5_congested(mddev_t *mddev, int bits)
3693 raid5_conf_t *conf = mddev->private;
3712 mddev_t *mddev = data;
3714 return mddev_congested(mddev, bits) ||
3715 md_raid5_congested(mddev, bits);
3725 mddev_t *mddev = q->queuedata;
3728 unsigned int chunk_sectors = mddev->chunk_sectors;
3734 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3735 chunk_sectors = mddev->new_chunk_sectors;
3745 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3748 unsigned int chunk_sectors = mddev->chunk_sectors;
3751 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3752 chunk_sectors = mddev->new_chunk_sectors;
3771 md_wakeup_thread(conf->mddev->thread);
3808 mddev_t *mddev;
3817 mddev = rdev->mddev;
3818 conf = mddev->private;
3820 rdev_dec_pending(rdev, conf->mddev);
3855 static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3857 raid5_conf_t *conf = mddev->private;
3862 if (!in_chunk_boundary(mddev, raid_bio)) {
3898 rdev_dec_pending(rdev, mddev);
3971 static int make_request(mddev_t *mddev, struct bio * bi)
3973 raid5_conf_t *conf = mddev->private;
3986 mddev->pers->quiesce(mddev, 1);
3987 mddev->pers->quiesce(mddev, 0);
3988 md_barrier_request(mddev, bi);
3992 md_write_start(mddev, bi);
3995 mddev->reshape_position == MaxSector &&
3996 chunk_aligned_read(mddev,bi))
4023 if (mddev->delta_disks < 0
4029 if (mddev->delta_disks < 0
4062 if (mddev->delta_disks < 0
4076 logical_sector >= mddev->suspend_lo &&
4077 logical_sector < mddev->suspend_hi) {
4086 if (logical_sector >= mddev->suspend_lo &&
4087 logical_sector < mddev->suspend_hi)
4106 if (mddev->barrier &&
4124 md_write_end(mddev);
4129 if (mddev->barrier) {
4133 wait_event(mddev->thread->wqueue,
4139 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
4141 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
4152 raid5_conf_t *conf = mddev->private;
4167 if (mddev->delta_disks < 0 &&
4168 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4169 sector_nr = raid5_size(mddev, 0, 0)
4171 } else if (mddev->delta_disks >= 0 &&
4176 mddev->curr_resync_completed = sector_nr;
4177 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4187 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4188 reshape_sectors = mddev->new_chunk_sectors;
4190 reshape_sectors = mddev->chunk_sectors;
4206 if (mddev->delta_disks < 0) {
4233 if ((mddev->delta_disks < 0
4240 mddev->reshape_position = conf->reshape_progress;
4241 mddev->curr_resync_completed = mddev->curr_resync;
4243 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4244 md_wakeup_thread(mddev->thread);
4245 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4248 conf->reshape_safe = mddev->reshape_position;
4251 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4254 if (mddev->delta_disks < 0) {
4257 BUG_ON((mddev->dev_sectors &
4283 if (s < raid5_size(mddev, 0, 0)) {
4298 if (mddev->delta_disks < 0)
4315 if (last_sector >= mddev->dev_sectors)
4316 last_sector = mddev->dev_sectors - 1;
4336 if ((sector_nr - mddev->curr_resync_completed) * 2
4337 >= mddev->resync_max - mddev->curr_resync_completed) {
4341 mddev->reshape_position = conf->reshape_progress;
4342 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
4344 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4345 md_wakeup_thread(mddev->thread);
4346 wait_event(mddev->sb_wait,
4347 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4350 conf->reshape_safe = mddev->reshape_position;
4353 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4358 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4360 raid5_conf_t *conf = mddev->private;
4362 sector_t max_sector = mddev->dev_sectors;
4369 unplug_slaves(mddev);
4371 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4376 if (mddev->curr_resync < max_sector) /* aborted */
4377 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4381 bitmap_close_sync(mddev->bitmap);
4389 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4390 return reshape_request(mddev, sector_nr, skipped);
4402 if (mddev->degraded >= conf->max_degraded &&
4403 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4404 sector_t rv = mddev->dev_sectors - sector_nr;
4408 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4409 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4418 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4436 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4521 static void raid5d(mddev_t *mddev)
4524 raid5_conf_t *conf = mddev->private;
4529 md_check_recovery(mddev);
4539 bitmap_unplug(mddev->bitmap);
4573 unplug_slaves(mddev);
4579 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4581 raid5_conf_t *conf = mddev->private;
4589 raid5_set_cache_size(mddev_t *mddev, int size)
4591 raid5_conf_t *conf = mddev->private;
4602 err = md_allow_write(mddev);
4615 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4617 raid5_conf_t *conf = mddev->private;
4628 err = raid5_set_cache_size(mddev, new);
4640 raid5_show_preread_threshold(mddev_t *mddev, char *page)
4642 raid5_conf_t *conf = mddev->private;
4650 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4652 raid5_conf_t *conf = mddev->private;
4674 stripe_cache_active_show(mddev_t *mddev, char *page)
4676 raid5_conf_t *conf = mddev->private;
4698 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4700 raid5_conf_t *conf = mddev->private;
4703 sectors = mddev->dev_sectors;
4708 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4709 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4825 static raid5_conf_t *setup_conf(mddev_t *mddev)
4832 if (mddev->new_level != 5
4833 && mddev->new_level != 4
4834 && mddev->new_level != 6) {
4836 mdname(mddev), mddev->new_level);
4839 if ((mddev->new_level == 5
4840 && !algorithm_valid_raid5(mddev->new_layout)) ||
4841 (mddev->new_level == 6
4842 && !algorithm_valid_raid6(mddev->new_layout))) {
4844 mdname(mddev), mddev->new_layout);
4847 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4849 mdname(mddev), mddev->raid_disks);
4853 if (!mddev->new_chunk_sectors ||
4854 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4855 !is_power_of_2(mddev->new_chunk_sectors)) {
4857 mdname(mddev), mddev->new_chunk_sectors << 9);
4877 conf->raid_disks = mddev->raid_disks;
4878 if (mddev->reshape_position == MaxSector)
4879 conf->previous_raid_disks = mddev->raid_disks;
4881 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4890 conf->mddev = mddev;
4895 conf->level = mddev->new_level;
4899 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4901 list_for_each_entry(rdev, &mddev->disks, same_set) {
4914 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4920 conf->chunk_sectors = mddev->new_chunk_sectors;
4921 conf->level = mddev->new_level;
4926 conf->algorithm = mddev->new_layout;
4928 conf->reshape_progress = mddev->reshape_position;
4930 conf->prev_chunk_sectors = mddev->chunk_sectors;
4931 conf->prev_algo = mddev->layout;
4939 mdname(mddev), memory);
4943 mdname(mddev), memory);
4945 conf->thread = md_register_thread(raid5d, mddev, NULL);
4949 mdname(mddev));
4990 static int run(mddev_t *mddev)
4998 if (mddev->recovery_cp != MaxSector)
5001 mdname(mddev));
5002 if (mddev->reshape_position != MaxSector) {
5010 int max_degraded = (mddev->level == 6 ? 2 : 1);
5012 if (mddev->new_level != mddev->level) {
5015 mdname(mddev));
5018 old_disks = mddev->raid_disks - mddev->delta_disks;
5023 here_new = mddev->reshape_position;
5024 if (sector_div(here_new, mddev->new_chunk_sectors *
5025 (mddev->raid_disks - max_degraded))) {
5027 "on a stripe boundary\n", mdname(mddev));
5030 reshape_offset = here_new * mddev->new_chunk_sectors;
5032 here_old = mddev->reshape_position;
5033 sector_div(here_old, mddev->chunk_sectors *
5037 if (mddev->delta_disks == 0) {
5045 if ((here_new * mddev->new_chunk_sectors !=
5046 here_old * mddev->chunk_sectors) ||
5047 mddev->ro == 0) {
5050 mdname(mddev));
5053 } else if (mddev->delta_disks < 0
5054 ? (here_new * mddev->new_chunk_sectors <=
5055 here_old * mddev->chunk_sectors)
5056 : (here_new * mddev->new_chunk_sectors >=
5057 here_old * mddev->chunk_sectors)) {
5061 mdname(mddev));
5065 mdname(mddev));
5068 BUG_ON(mddev->level != mddev->new_level);
5069 BUG_ON(mddev->layout != mddev->new_layout);
5070 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
5071 BUG_ON(mddev->delta_disks != 0);
5074 if (mddev->private == NULL)
5075 conf = setup_conf(mddev);
5077 conf = mddev->private;
5082 mddev->thread = conf->thread;
5084 mddev->private = conf;
5089 list_for_each_entry(rdev, &mddev->disks, same_set) {
5105 if (mddev->major_version == 0 &&
5106 mddev->minor_version > 90)
5125 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
5131 mdname(mddev), mddev->degraded, conf->raid_disks);
5136 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
5137 mddev->resync_max_sectors = mddev->dev_sectors;
5139 if (mddev->degraded > dirty_parity_disks &&
5140 mddev->recovery_cp != MaxSector) {
5141 if (mddev->ok_start_degraded)
5145 mdname(mddev));
5149 mdname(mddev));
5154 if (mddev->degraded == 0)
5156 " devices, algorithm %d\n", mdname(mddev), conf->level,
5157 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5158 mddev->new_layout);
5162 mdname(mddev), conf->level,
5163 mddev->raid_disks - mddev->degraded,
5164 mddev->raid_disks, mddev->new_layout);
5171 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5172 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5173 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5174 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5175 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5181 if (mddev->to_remove == &raid5_attrs_group)
5182 mddev->to_remove = NULL;
5183 else if (mddev->kobj.sd &&
5184 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5187 mdname(mddev));
5188 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5191 mddev->plug = &conf->plug;
5192 if (mddev->queue) {
5200 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
5201 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5202 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5204 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
5206 mddev->queue->backing_dev_info.congested_data = mddev;
5207 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5208 mddev->queue->queue_lock = &conf->device_lock;
5209 mddev->queue->unplug_fn = raid5_unplug_queue;
5211 chunk_size = mddev->chunk_sectors << 9;
5212 blk_queue_io_min(mddev->queue, chunk_size);
5213 blk_queue_io_opt(mddev->queue, chunk_size *
5216 list_for_each_entry(rdev, &mddev->disks, same_set)
5217 disk_stack_limits(mddev->gendisk, rdev->bdev,
5223 md_unregister_thread(mddev->thread);
5224 mddev->thread = NULL;
5229 mddev->private = NULL;
5230 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
5234 static int stop(mddev_t *mddev)
5236 raid5_conf_t *conf = mddev->private;
5238 md_unregister_thread(mddev->thread);
5239 mddev->thread = NULL;
5240 if (mddev->queue)
5241 mddev->queue->backing_dev_info.congested_fn = NULL;
5244 mddev->private = NULL;
5245 mddev->to_remove = &raid5_attrs_group;
5284 static void status(struct seq_file *seq, mddev_t *mddev)
5286 raid5_conf_t *conf = mddev->private;
5289 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5290 mddev->chunk_sectors / 2, mddev->layout);
5291 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5315 conf->raid_disks - conf->mddev->degraded);
5327 static int raid5_spare_active(mddev_t *mddev)
5330 raid5_conf_t *conf = mddev->private;
5346 mddev->degraded -= count;
5352 static int raid5_remove_disk(mddev_t *mddev, int number)
5354 raid5_conf_t *conf = mddev->private;
5394 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5396 raid5_conf_t *conf = mddev->private;
5434 static int raid5_resize(mddev_t *mddev, sector_t sectors)
5443 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5444 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5445 mddev->raid_disks));
5446 if (mddev->array_sectors >
5447 raid5_size(mddev, sectors, mddev->raid_disks))
5449 set_capacity(mddev->gendisk, mddev->array_sectors);
5450 revalidate_disk(mddev->gendisk);
5451 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
5452 mddev->recovery_cp = mddev->dev_sectors;
5453 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5455 mddev->dev_sectors = sectors;
5456 mddev->resync_max_sectors = sectors;
5460 static int check_stripe_cache(mddev_t *mddev)
5470 raid5_conf_t *conf = mddev->private;
5471 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5473 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5476 mdname(mddev),
5477 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5484 static int check_reshape(mddev_t *mddev)
5486 raid5_conf_t *conf = mddev->private;
5488 if (mddev->delta_disks == 0 &&
5489 mddev->new_layout == mddev->layout &&
5490 mddev->new_chunk_sectors == mddev->chunk_sectors)
5492 if (mddev->bitmap)
5497 if (mddev->delta_disks < 0) {
5504 if (mddev->level == 6)
5506 if (mddev->raid_disks + mddev->delta_disks < min)
5510 if (!check_stripe_cache(mddev))
5513 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5516 static int raid5_start_reshape(mddev_t *mddev)
5518 raid5_conf_t *conf = mddev->private;
5524 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5527 if (!check_stripe_cache(mddev))
5530 list_for_each_entry(rdev, &mddev->disks, same_set)
5535 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5545 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5546 < mddev->array_sectors) {
5548 "before number of disks\n", mdname(mddev));
5555 conf->raid_disks += mddev->delta_disks;
5557 conf->chunk_sectors = mddev->new_chunk_sectors;
5559 conf->algorithm = mddev->new_layout;
5560 if (mddev->delta_disks < 0)
5561 conf->reshape_progress = raid5_size(mddev, 0, 0);
5575 if (mddev->delta_disks >= 0)
5576 list_for_each_entry(rdev, &mddev->disks, same_set)
5579 if (raid5_add_disk(mddev, rdev) == 0) {
5587 if (sysfs_create_link(&mddev->kobj,
5597 if (mddev->delta_disks > 0) {
5599 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5603 mddev->raid_disks = conf->raid_disks;
5604 mddev->reshape_position = conf->reshape_progress;
5605 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5607 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5608 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5609 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5610 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5611 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5613 if (!mddev->sync_thread) {
5614 mddev->recovery = 0;
5616 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5622 md_wakeup_thread(mddev->sync_thread);
5623 md_new_event(mddev);
5633 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5644 if (conf->mddev->queue) {
5648 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5649 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5657 static void raid5_finish_reshape(mddev_t *mddev)
5659 raid5_conf_t *conf = mddev->private;
5661 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5663 if (mddev->delta_disks > 0) {
5664 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5665 set_capacity(mddev->gendisk, mddev->array_sectors);
5666 revalidate_disk(mddev->gendisk);
5669 mddev->degraded = conf->raid_disks;
5674 mddev->degraded--;
5676 d < conf->raid_disks - mddev->delta_disks;
5679 if (rdev && raid5_remove_disk(mddev, d) == 0) {
5682 sysfs_remove_link(&mddev->kobj, nm);
5687 mddev->layout = conf->algorithm;
5688 mddev->chunk_sectors = conf->chunk_sectors;
5689 mddev->reshape_position = MaxSector;
5690 mddev->delta_disks = 0;
5694 static void raid5_quiesce(mddev_t *mddev, int state)
5696 raid5_conf_t *conf = mddev->private;
5730 static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5732 struct raid0_private_data *raid0_priv = mddev->private;
5737 mdname(mddev));
5741 mddev->new_level = level;
5742 mddev->new_layout = ALGORITHM_PARITY_N;
5743 mddev->new_chunk_sectors = mddev->chunk_sectors;
5744 mddev->raid_disks += 1;
5745 mddev->delta_disks = 1;
5747 mddev->recovery_cp = MaxSector;
5749 return setup_conf(mddev);
5753 static void *raid5_takeover_raid1(mddev_t *mddev)
5757 if (mddev->raid_disks != 2 ||
5758 mddev->degraded > 1)
5766 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5773 mddev->new_level = 5;
5774 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5775 mddev->new_chunk_sectors = chunksect;
5777 return setup_conf(mddev);
5780 static void *raid5_takeover_raid6(mddev_t *mddev)
5784 switch (mddev->layout) {
5806 mddev->new_level = 5;
5807 mddev->new_layout = new_layout;
5808 mddev->delta_disks = -1;
5809 mddev->raid_disks -= 1;
5810 return setup_conf(mddev);
5814 static int raid5_check_reshape(mddev_t *mddev)
5821 raid5_conf_t *conf = mddev->private;
5822 int new_chunk = mddev->new_chunk_sectors;
5824 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5831 if (mddev->array_sectors & (new_chunk-1))
5838 if (mddev->raid_disks == 2) {
5840 if (mddev->new_layout >= 0) {
5841 conf->algorithm = mddev->new_layout;
5842 mddev->layout = mddev->new_layout;
5846 mddev->chunk_sectors = new_chunk;
5848 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5849 md_wakeup_thread(mddev->thread);
5851 return check_reshape(mddev);
5854 static int raid6_check_reshape(mddev_t *mddev)
5856 int new_chunk = mddev->new_chunk_sectors;
5858 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5865 if (mddev->array_sectors & (new_chunk-1))
5871 return check_reshape(mddev);
5874 static void *raid5_takeover(mddev_t *mddev)
5882 if (mddev->level == 0)
5883 return raid45_takeover_raid0(mddev, 5);
5884 if (mddev->level == 1)
5885 return raid5_takeover_raid1(mddev);
5886 if (mddev->level == 4) {
5887 mddev->new_layout = ALGORITHM_PARITY_N;
5888 mddev->new_level = 5;
5889 return setup_conf(mddev);
5891 if (mddev->level == 6)
5892 return raid5_takeover_raid6(mddev);
5897 static void *raid4_takeover(mddev_t *mddev)
5903 if (mddev->level == 0)
5904 return raid45_takeover_raid0(mddev, 4);
5905 if (mddev->level == 5 &&
5906 mddev->layout == ALGORITHM_PARITY_N) {
5907 mddev->new_layout = 0;
5908 mddev->new_level = 4;
5909 return setup_conf(mddev);
5916 static void *raid6_takeover(mddev_t *mddev)
5924 if (mddev->pers != &raid5_personality)
5926 if (mddev->degraded > 1)
5928 if (mddev->raid_disks > 253)
5930 if (mddev->raid_disks < 3)
5933 switch (mddev->layout) {
5955 mddev->new_level = 6;
5956 mddev->new_layout = new_layout;
5957 mddev->delta_disks = 1;
5958 mddev->raid_disks += 1;
5959 return setup_conf(mddev);