• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/md/

Lines Matching refs:mddev

117 				blk_plug_device(conf->mddev->queue);
121 blk_plug_device(conf->mddev->queue);
126 md_wakeup_thread(conf->mddev->thread);
131 md_wakeup_thread(conf->mddev->thread);
138 md_wakeup_thread(conf->mddev->thread);
271 static void unplug_slaves(mddev_t *mddev);
301 raid5_unplug_device(conf->mddev->queue)
356 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
357 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
408 md_allow_write(conf->mddev);
448 unplug_slaves(conf->mddev)
553 mdname(conf->mddev), STRIPE_SECTORS,
568 if (conf->mddev->degraded)
570 mdname(conf->mddev),
576 mdname(conf->mddev),
583 mdname(conf->mddev), bdn);
591 md_error(conf->mddev, rdev);
594 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
625 md_error(conf->mddev, conf->disks[i].rdev);
627 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
657 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
660 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
664 set_bit(MD_CHANGE_DEVS, &mddev->flags);
668 mddev->degraded++;
673 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
679 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1278 if (conf->mddev->bitmap && firstwrite) {
1279 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1447 md_error(conf->mddev, rdev);
1464 md_write_end(conf->mddev);
1478 md_write_end(conf->mddev);
1505 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1510 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1543 md_write_end(conf->mddev);
1553 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1606 /* && !(!mddev->insync && i == sh->pd_idx) */
1679 md_wakeup_thread(conf->mddev->thread);
1699 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1700 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1725 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1732 if (failed == 1 && ! conf->mddev->ro &&
1766 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1989 md_error(conf->mddev, rdev);
2006 md_write_end(conf->mddev);
2020 md_write_end(conf->mddev);
2047 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2052 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2096 md_write_end(conf->mddev);
2106 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2238 md_wakeup_thread(conf->mddev->thread);
2293 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2294 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2335 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2342 if (failed <= 2 && ! conf->mddev->ro)
2377 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2545 static void unplug_slaves(mddev_t *mddev)
2547 raid5_conf_t *conf = mddev_to_conf(mddev);
2551 for (i=0; i<mddev->raid_disks; i++) {
2562 rdev_dec_pending(rdev, mddev);
2571 mddev_t *mddev = q->queuedata;
2572 raid5_conf_t *conf = mddev_to_conf(mddev);
2581 md_wakeup_thread(mddev->thread);
2585 unplug_slaves(mddev);
2591 mddev_t *mddev = q->queuedata;
2592 raid5_conf_t *conf = mddev_to_conf(mddev);
2596 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
2609 rdev_dec_pending(rdev, mddev);
2620 mddev_t *mddev = data;
2621 raid5_conf_t *conf = mddev_to_conf(mddev);
2641 mddev_t *mddev = q->queuedata;
2644 unsigned int chunk_sectors = mddev->chunk_size >> 9;
2659 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
2662 unsigned int chunk_sectors = mddev->chunk_size >> 9;
2683 md_wakeup_thread(conf->mddev->thread);
2717 mddev_t *mddev;
2726 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
2727 conf = mddev_to_conf(mddev);
2731 rdev_dec_pending(rdev, conf->mddev);
2770 mddev_t *mddev = q->queuedata;
2771 raid5_conf_t *conf = mddev_to_conf(mddev);
2778 if (!in_chunk_boundary(mddev, raid_bio)) {
2817 rdev_dec_pending(rdev, mddev);
2840 mddev_t *mddev = q->queuedata;
2841 raid5_conf_t *conf = mddev_to_conf(mddev);
2854 md_write_start(mddev, bi);
2856 disk_stat_inc(mddev->gendisk, ios[rw]);
2857 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
2860 mddev->reshape_position == MaxSector &&
2930 if (logical_sector >= mddev->suspend_lo &&
2931 logical_sector < mddev->suspend_hi) {
2943 raid5_unplug_device(mddev->queue);
2966 md_write_end(mddev);
2975 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
2986 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3026 mddev->reshape_position = conf->expand_progress;
3027 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3028 md_wakeup_thread(mddev->thread);
3029 wait_event(mddev->sb_wait, mddev->flags == 0 ||
3032 conf->expand_lo = mddev->reshape_position;
3056 if (s < (mddev->array_size<<1)) {
3087 if (last_sector >= (mddev->size<<1))
3088 last_sector = (mddev->size<<1)-1;
3102 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
3104 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3108 sector_t max_sector = mddev->size << 1;
3115 unplug_slaves(mddev);
3116 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3121 if (mddev->curr_resync < max_sector) /* aborted */
3122 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3126 bitmap_close_sync(mddev->bitmap);
3131 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3132 return reshape_request(mddev, sector_nr, skipped);
3138 if (mddev->degraded >= conf->max_degraded &&
3139 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3140 sector_t rv = (mddev->size << 1) - sector_nr;
3144 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3145 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3166 for (i=0; i<mddev->raid_disks; i++)
3170 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
3266 static void raid5d (mddev_t *mddev)
3269 raid5_conf_t *conf = mddev_to_conf(mddev);
3274 md_check_recovery(mddev);
3285 bitmap_unplug(mddev->bitmap);
3293 !blk_queue_plugged(mddev->queue) &&
3328 unplug_slaves(mddev);
3334 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3336 raid5_conf_t *conf = mddev_to_conf(mddev);
3344 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3346 raid5_conf_t *conf = mddev_to_conf(mddev);
3365 md_allow_write(mddev);
3380 stripe_cache_active_show(mddev_t *mddev, char *page)
3382 raid5_conf_t *conf = mddev_to_conf(mddev);
3402 static int run(mddev_t *mddev)
3411 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
3413 mdname(mddev), mddev->level);
3417 if (mddev->reshape_position != MaxSector) {
3425 int max_degraded = (mddev->level == 5 ? 1 : 2);
3427 if (mddev->new_level != mddev->level ||
3428 mddev->new_layout != mddev->layout ||
3429 mddev->new_chunk != mddev->chunk_size) {
3432 mdname(mddev));
3435 if (mddev->delta_disks <= 0) {
3438 mdname(mddev));
3441 old_disks = mddev->raid_disks - mddev->delta_disks;
3446 here_new = mddev->reshape_position;
3447 if (sector_div(here_new, (mddev->chunk_size>>9)*
3448 (mddev->raid_disks - max_degraded))) {
3454 here_old = mddev->reshape_position;
3455 sector_div(here_old, (mddev->chunk_size>>9)*
3470 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
3471 if ((conf = mddev->private) == NULL)
3473 if (mddev->reshape_position == MaxSector) {
3474 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks;
3476 conf->raid_disks = mddev->raid_disks;
3477 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
3485 conf->mddev = mddev;
3490 if (mddev->level == 6) {
3506 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
3508 ITERATE_RDEV(mddev,rdev,tmp) {
3529 mddev->degraded = conf->raid_disks - working_disks;
3530 conf->mddev = mddev;
3531 conf->chunk_size = mddev->chunk_size;
3532 conf->level = mddev->level;
3537 conf->algorithm = mddev->layout;
3539 conf->expand_progress = mddev->reshape_position;
3542 mddev->size &= ~(mddev->chunk_size/1024 -1);
3543 mddev->resync_max_sectors = mddev->size << 1;
3547 mdname(mddev), conf->raid_disks);
3552 conf->chunk_size, mdname(mddev));
3558 conf->algorithm, mdname(mddev));
3561 if (mddev->degraded > conf->max_degraded) {
3564 mdname(mddev), mddev->degraded, conf->raid_disks);
3568 if (mddev->degraded > 0 &&
3569 mddev->recovery_cp != MaxSector) {
3570 if (mddev->ok_start_degraded)
3574 mdname(mddev));
3578 mdname(mddev));
3584 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
3585 if (!mddev->thread) {
3588 mdname(mddev));
3598 md_unregister_thread(mddev->thread);
3602 memory, mdname(mddev));
3604 if (mddev->degraded == 0)
3606 " devices, algorithm %d\n", conf->level, mdname(mddev),
3607 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
3612 mdname(mddev), mddev->raid_disks - mddev->degraded,
3613 mddev->raid_disks, conf->algorithm);
3621 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3622 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3623 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3624 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3625 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3635 (mddev->chunk_size / PAGE_SIZE);
3636 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3637 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3641 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
3644 mdname(mddev));
3646 mddev->queue->unplug_fn = raid5_unplug_device;
3647 mddev->queue->issue_flush_fn = raid5_issue_flush;
3648 mddev->queue->backing_dev_info.congested_data = mddev;
3649 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
3651 mddev->array_size = mddev->size * (conf->previous_raid_disks -
3654 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
3665 mddev->private = NULL;
3666 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
3672 static int stop(mddev_t *mddev)
3674 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3676 md_unregister_thread(mddev->thread);
3677 mddev->thread = NULL;
3680 mddev->queue->backing_dev_info.congested_fn = NULL;
3681 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
3682 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
3685 mddev->private = NULL;
3724 static void status (struct seq_file *seq, mddev_t *mddev)
3726 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3729 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
3730 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
3753 conf->raid_disks - conf->mddev->degraded);
3765 static int raid5_spare_active(mddev_t *mddev)
3768 raid5_conf_t *conf = mddev->private;
3778 mddev->degraded--;
3786 static int raid5_remove_disk(mddev_t *mddev, int number)
3788 raid5_conf_t *conf = mddev->private;
3815 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
3817 raid5_conf_t *conf = mddev->private;
3822 if (mddev->degraded > conf->max_degraded)
3849 static int raid5_resize(mddev_t *mddev, sector_t sectors)
3858 raid5_conf_t *conf = mddev_to_conf(mddev);
3860 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
3861 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
3862 set_capacity(mddev->gendisk, mddev->array_size << 1);
3863 mddev->changed = 1;
3864 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
3865 mddev->recovery_cp = mddev->size << 1;
3866 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3868 mddev->size = sectors /2;
3869 mddev->resync_max_sectors = sectors;
3874 static int raid5_check_reshape(mddev_t *mddev)
3876 raid5_conf_t *conf = mddev_to_conf(mddev);
3879 if (mddev->delta_disks < 0 ||
3880 mddev->new_level != mddev->level)
3882 if (mddev->delta_disks == 0)
3893 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
3894 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
3896 (mddev->chunk_size / STRIPE_SIZE)*4);
3900 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
3904 if (mddev->degraded > conf->max_degraded)
3910 static int raid5_start_reshape(mddev_t *mddev)
3912 raid5_conf_t *conf = mddev_to_conf(mddev);
3919 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3922 ITERATE_RDEV(mddev, rdev, rtmp)
3927 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
3936 conf->raid_disks += mddev->delta_disks;
3944 ITERATE_RDEV(mddev, rdev, rtmp)
3947 if (raid5_add_disk(mddev, rdev)) {
3953 if (sysfs_create_link(&mddev->kobj,
3958 nm, mdname(mddev));
3964 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
3966 mddev->raid_disks = conf->raid_disks;
3967 mddev->reshape_position = 0;
3968 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3970 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3971 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3972 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3973 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3974 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3976 if (!mddev->sync_thread) {
3977 mddev->recovery = 0;
3979 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
3984 md_wakeup_thread(mddev->sync_thread);
3985 md_new_event(mddev);
3994 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
3995 conf->mddev->array_size = conf->mddev->size *
3997 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
3998 conf->mddev->changed = 1;
4000 bdev = bdget_disk(conf->mddev->gendisk, 0);
4003 i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10);
4010 conf->mddev->reshape_position = MaxSector;
4018 (conf->mddev->chunk_size / PAGE_SIZE);
4019 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4020 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4025 static void raid5_quiesce(mddev_t *mddev, int state)
4027 raid5_conf_t *conf = mddev_to_conf(mddev);