• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching refs:mddev

60 static void unplug_slaves(mddev_t *mddev);
73 if (!r10_bio && conf->mddev)
74 unplug_slaves(conf->mddev);
110 unplug_slaves(conf->mddev);
114 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
193 conf_t *conf = r10_bio->mddev->private;
207 conf_t *conf = r10_bio->mddev->private;
217 mddev_t *mddev = r10_bio->mddev;
218 conf_t *conf = mddev->private;
228 md_wakeup_thread(mddev->thread);
250 conf_t *conf = r10_bio->mddev->private;
261 conf_t *conf = r10_bio->mddev->private;
290 mdname(conf->mddev),
295 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
303 conf_t *conf = r10_bio->mddev->private;
314 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
338 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
342 md_write_end(r10_bio->mddev);
346 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
464 mddev_t *mddev = q->queuedata;
467 unsigned int chunk_sectors = mddev->chunk_sectors;
509 if (conf->mddev->recovery_cp < MaxSector
596 static void unplug_slaves(mddev_t *mddev)
598 conf_t *conf = mddev->private;
612 rdev_dec_pending(rdev, mddev);
621 mddev_t *mddev = q->queuedata;
624 md_wakeup_thread(mddev->thread);
629 mddev_t *mddev = data;
630 conf_t *conf = mddev->private;
633 if (mddev_congested(mddev, bits))
661 blk_remove_plug(conf->mddev->queue);
665 bitmap_unplug(conf->mddev->bitmap);
708 raid10_unplug(conf->mddev->queue));
717 raid10_unplug(conf->mddev->queue));
738 raid10_unplug(conf->mddev->queue));
775 raid10_unplug(conf->mddev->queue); }));
789 static int make_request(mddev_t *mddev, struct bio * bio)
791 conf_t *conf = mddev->private;
804 md_barrier_request(mddev, bio);
837 if (make_request(mddev, &bp->bio1))
839 if (make_request(mddev, &bp->bio2))
851 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
858 md_write_start(mddev, bio);
872 r10_bio->mddev = mddev;
940 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
943 md_wait_for_blocked_rdev(blocked_rdev, mddev);
973 md_write_end(mddev);
978 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
981 blk_plug_device(mddev->queue);
988 md_wakeup_thread(mddev->thread);
993 static void status(struct seq_file *seq, mddev_t *mddev)
995 conf_t *conf = mddev->private;
999 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1009 conf->raid_disks - mddev->degraded);
1017 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1020 conf_t *conf = mddev->private;
1029 && conf->raid_disks-mddev->degraded == 1)
1041 mddev->degraded++;
1046 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1049 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1052 mdname(mddev), bdevname(rdev->bdev, b),
1053 mdname(mddev), conf->raid_disks - mddev->degraded);
1066 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1110 static int raid10_spare_active(mddev_t *mddev)
1113 conf_t *conf = mddev->private;
1132 mddev->degraded -= count;
1140 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1142 conf_t *conf = mddev->private;
1149 if (mddev->recovery_cp < MaxSector)
1169 disk_stack_limits(mddev->gendisk, rdev->bdev,
1178 blk_queue_max_segments(mddev->queue, 1);
1179 blk_queue_segment_boundary(mddev->queue,
1192 md_integrity_add_rdev(rdev, mddev);
1197 static int raid10_remove_disk(mddev_t *mddev, int number)
1199 conf_t *conf = mddev->private;
1228 md_integrity_register(mddev);
1240 conf_t *conf = r10_bio->mddev->private;
1255 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1256 md_error(r10_bio->mddev,
1263 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1277 mddev_t *mddev = r10_bio->mddev;
1278 conf_t *conf = mddev->private;
1287 md_error(mddev, conf->mirrors[d].rdev);
1291 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1297 md_done_sync(mddev, s, 1);
1323 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1325 conf_t *conf = mddev->private;
1365 mddev->resync_mismatches += r10_bio->sectors;
1367 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1407 md_done_sync(mddev, r10_bio->sectors, 1);
1423 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1425 conf_t *conf = mddev->private;
1457 static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1496 static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1501 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
1519 check_decay_read_errors(mddev, rdev);
1528 mdname(mddev),
1532 "device\n", mdname(mddev), b);
1533 md_error(mddev, conf->mirrors[d].rdev);
1561 rdev_dec_pending(rdev, mddev);
1575 md_error(mddev, conf->mirrors[dn].rdev);
1605 mdname(mddev), s,
1611 mdname(mddev),
1613 md_error(mddev, rdev);
1615 rdev_dec_pending(rdev, mddev);
1642 mdname(mddev), s,
1647 mdname(mddev),
1650 md_error(mddev, rdev);
1655 mdname(mddev), s,
1661 rdev_dec_pending(rdev, mddev);
1672 static void raid10d(mddev_t *mddev)
1677 conf_t *conf = mddev->private;
1682 md_check_recovery(mddev);
1699 mddev = r10_bio->mddev;
1700 conf = mddev->private;
1702 sync_request_write(mddev, r10_bio);
1705 recovery_request_write(mddev, r10_bio);
1717 if (mddev->ro == 0) {
1719 fix_read_error(conf, mddev, r10_bio);
1725 mddev->ro ? IO_BLOCKED : NULL;
1730 mdname(mddev),
1742 mdname(mddev),
1760 unplug_slaves(mddev);
1787 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1809 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1811 conf_t *conf = mddev->private;
1828 max_sector = mddev->dev_sectors;
1829 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1830 max_sector = mddev->resync_max_sectors;
1837 * mddev->curr_resync, but for recovery,
1841 if (mddev->curr_resync < max_sector) { /* aborted */
1842 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1843 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1847 raid10_find_virt(conf, mddev->curr_resync, i);
1848 bitmap_end_sync(mddev->bitmap, sect,
1854 bitmap_close_sync(mddev->bitmap);
1867 if (max_sector > mddev->resync_max)
1868 max_sector = mddev->resync_max; /* Don't do IO beyond here */
1899 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1915 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1935 r10_bio->mddev = mddev;
1951 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1999 &mddev->recovery))
2002 mdname(mddev));
2019 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2021 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2022 &sync_blocks, mddev->degraded) &&
2023 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2032 r10_bio->mddev = mddev;
2068 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
2138 md_done_sync(mddev, sectors_skipped, 1);
2155 raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2158 conf_t *conf = mddev->private;
2174 static conf_t *setup_conf(mddev_t *mddev)
2181 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
2182 !is_power_of_2(mddev->new_chunk_sectors)) {
2185 mdname(mddev), PAGE_SIZE);
2189 nc = mddev->new_layout & 255;
2190 fc = (mddev->new_layout >> 8) & 255;
2191 fo = mddev->new_layout & (1<<16);
2193 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2194 (mddev->new_layout >> 17)) {
2196 mdname(mddev), mddev->new_layout);
2205 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2215 conf->raid_disks = mddev->raid_disks;
2220 conf->chunk_mask = mddev->new_chunk_sectors - 1;
2221 conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
2228 size = mddev->dev_sectors >> conf->chunk_shift;
2257 conf->thread = md_register_thread(raid10d, mddev, NULL);
2261 conf->mddev = mddev;
2266 mdname(mddev));
2277 static int run(mddev_t *mddev)
2291 if (mddev->private == NULL) {
2292 conf = setup_conf(mddev);
2295 mddev->private = conf;
2297 conf = mddev->private;
2301 mddev->queue->queue_lock = &conf->device_lock;
2303 mddev->thread = conf->thread;
2306 chunk_size = mddev->chunk_sectors << 9;
2307 blk_queue_io_min(mddev->queue, chunk_size);
2309 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2311 blk_queue_io_opt(mddev->queue, chunk_size *
2314 list_for_each_entry(rdev, &mddev->disks, same_set) {
2322 disk_stack_limits(mddev->gendisk, rdev->bdev,
2329 blk_queue_max_segments(mddev->queue, 1);
2330 blk_queue_segment_boundary(mddev->queue,
2339 mdname(mddev));
2343 mddev->degraded = 0;
2351 mddev->degraded++;
2357 if (mddev->recovery_cp != MaxSector)
2360 mdname(mddev));
2363 mdname(mddev), conf->raid_disks - mddev->degraded,
2368 mddev->dev_sectors = conf->dev_sectors;
2369 size = raid10_size(mddev, 0, 0);
2370 md_set_array_sectors(mddev, size);
2371 mddev->resync_max_sectors = size;
2373 mddev->queue->unplug_fn = raid10_unplug;
2374 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2375 mddev->queue->backing_dev_info.congested_data = mddev;
2383 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
2385 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2386 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2390 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2391 md_integrity_register(mddev);
2395 md_unregister_thread(mddev->thread);
2401 mddev->private = NULL;
2406 static int stop(mddev_t *mddev)
2408 conf_t *conf = mddev->private;
2413 md_unregister_thread(mddev->thread);
2414 mddev->thread = NULL;
2415 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2420 mddev->private = NULL;
2424 static void raid10_quiesce(mddev_t *mddev, int state)
2426 conf_t *conf = mddev->private;
2438 static void *raid10_takeover_raid0(mddev_t *mddev)
2443 if (mddev->degraded > 0) {
2445 mdname(mddev));
2450 mddev->new_level = 10;
2452 mddev->new_layout = (1<<8) + 2;
2453 mddev->new_chunk_sectors = mddev->chunk_sectors;
2454 mddev->delta_disks = mddev->raid_disks;
2455 mddev->raid_disks *= 2;
2457 mddev->recovery_cp = MaxSector;
2459 conf = setup_conf(mddev);
2461 list_for_each_entry(rdev, &mddev->disks, same_set)
2468 static void *raid10_takeover(mddev_t *mddev)
2475 if (mddev->level == 0) {
2477 raid0_priv = mddev->private;
2481 mdname(mddev));
2484 return raid10_takeover_raid0(mddev);