• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching refs:mddev

55 static void unplug_slaves(mddev_t *mddev);
68 if (!r1_bio && pi->mddev)
69 unplug_slaves(pi->mddev);
95 unplug_slaves(pi->mddev);
114 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
130 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
186 conf_t *conf = r1_bio->mddev->private;
200 conf_t *conf = r1_bio->mddev->private;
206 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
217 mddev_t *mddev = r1_bio->mddev;
218 conf_t *conf = mddev->private;
226 md_wakeup_thread(mddev->thread);
257 conf_t *conf = r1_bio->mddev->private;
268 conf_t *conf = r1_bio->mddev->private;
285 if (r1_bio->mddev->degraded == conf->raid_disks ||
286 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
301 mdname(conf->mddev),
306 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
314 conf_t *conf = r1_bio->mddev->private;
325 r1_bio->mddev->barriers_work = 0;
334 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
373 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
392 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
396 md_write_end(r1_bio->mddev);
436 if (conf->mddev->recovery_cp < MaxSector &&
533 rdev_dec_pending(rdev, conf->mddev);
544 static void unplug_slaves(mddev_t *mddev)
546 conf_t *conf = mddev->private;
550 for (i=0; i<mddev->raid_disks; i++) {
560 rdev_dec_pending(rdev, mddev);
569 mddev_t *mddev = q->queuedata;
571 unplug_slaves(mddev);
572 md_wakeup_thread(mddev->thread);
577 mddev_t *mddev = data;
578 conf_t *conf = mddev->private;
581 if (mddev_congested(mddev, bits))
585 for (i = 0; i < mddev->raid_disks; i++) {
617 blk_remove_plug(conf->mddev->queue);
621 bitmap_unplug(conf->mddev->bitmap);
665 raid1_unplug(conf->mddev->queue));
674 raid1_unplug(conf->mddev->queue));
696 raid1_unplug(conf->mddev->queue));
733 raid1_unplug(conf->mddev->queue); }));
778 static int make_request(mddev_t *mddev, struct bio * bio)
780 conf_t *conf = mddev->private;
803 md_write_start(mddev, bio); /* wait on superblock update early */
806 bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
807 bio->bi_sector < mddev->suspend_hi) {
817 if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
818 bio->bi_sector >= mddev->suspend_hi)
824 if (unlikely(!mddev->barriers_work &&
827 md_write_end(mddev);
834 bitmap = mddev->bitmap;
846 r1_bio->mddev = mddev;
908 rdev_dec_pending(rdev, mddev);
925 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
928 md_wait_for_blocked_rdev(blocked_rdev, mddev);
947 < mddev->bitmap_info.max_write_behind) &&
1003 blk_plug_device(mddev->queue);
1010 md_wakeup_thread(mddev->thread);
1015 static void status(struct seq_file *seq, mddev_t *mddev)
1017 conf_t *conf = mddev->private;
1021 conf->raid_disks - mddev->degraded);
1033 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1036 conf_t *conf = mddev->private;
1045 && (conf->raid_disks - mddev->degraded) == 1) {
1052 mddev->recovery_disabled = 1;
1058 mddev->degraded++;
1064 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1067 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1070 mdname(mddev), bdevname(rdev->bdev, b),
1071 mdname(mddev), conf->raid_disks - mddev->degraded);
1083 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1108 static int raid1_spare_active(mddev_t *mddev)
1111 conf_t *conf = mddev->private;
1118 * Called under mddev lock, so rcu protection not needed.
1130 mddev->degraded -= count;
1138 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1140 conf_t *conf = mddev->private;
1145 int last = mddev->raid_disks - 1;
1153 disk_stack_limits(mddev->gendisk, rdev->bdev,
1162 blk_queue_max_segments(mddev->queue, 1);
1163 blk_queue_segment_boundary(mddev->queue,
1178 md_integrity_add_rdev(rdev, mddev);
1183 static int raid1_remove_disk(mddev_t *mddev, int number)
1185 conf_t *conf = mddev->private;
1202 !mddev->recovery_disabled &&
1203 mddev->degraded < conf->raid_disks) {
1215 md_integrity_register(mddev);
1229 for (i=r1_bio->mddev->raid_disks; i--; )
1250 mddev_t *mddev = r1_bio->mddev;
1251 conf_t *conf = mddev->private;
1266 bitmap_end_sync(mddev->bitmap, s,
1271 md_error(mddev, conf->mirrors[mirror].rdev);
1279 md_done_sync(mddev, s, uptodate);
1283 static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1285 conf_t *conf = mddev->private;
1293 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1303 for (i=0; i<mddev->raid_disks; i++)
1305 md_error(mddev, conf->mirrors[i].rdev);
1307 md_done_sync(mddev, r1_bio->sectors, 1);
1311 for (primary=0; primary<mddev->raid_disks; primary++)
1315 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1319 for (i=0; i<mddev->raid_disks; i++)
1339 mddev->resync_mismatches += r1_bio->sectors;
1340 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1343 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1435 md_error(mddev, rdev);
1450 md_error(mddev, rdev);
1455 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1458 mdname(mddev),
1461 md_done_sync(mddev, r1_bio->sectors, 0);
1480 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1493 md_done_sync(mddev, r1_bio->sectors, 1);
1509 mddev_t *mddev = conf->mddev;
1543 md_error(mddev, conf->mirrors[read_disk].rdev);
1560 md_error(mddev, rdev);
1577 md_error(mddev, rdev);
1583 mdname(mddev), s,
1595 static void raid1d(mddev_t *mddev)
1600 conf_t *conf = mddev->private;
1605 md_check_recovery(mddev);
1622 mddev = r1_bio->mddev;
1623 conf = mddev->private;
1625 sync_request_write(mddev, r1_bio);
1673 if (mddev->ro == 0) {
1680 md_error(mddev,
1687 mdname(mddev),
1694 mddev->ro ? IO_BLOCKED : NULL;
1703 mdname(mddev),
1718 unplug_slaves(mddev);
1746 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1748 conf_t *conf = mddev->private;
1763 max_sector = mddev->dev_sectors;
1768 * We can find the current addess in mddev->curr_resync
1770 if (mddev->curr_resync < max_sector) /* aborted */
1771 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1776 bitmap_close_sync(mddev->bitmap);
1781 if (mddev->bitmap == NULL &&
1782 mddev->recovery_cp == MaxSector &&
1783 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1791 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1792 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1805 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1821 r1_bio->mddev = mddev;
1875 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1889 if (max_sector > mddev->resync_max)
1890 max_sector = mddev->resync_max; /* Don't do IO beyond here */
1901 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1904 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1942 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1961 static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
1966 return mddev->dev_sectors;
1969 static conf_t *setup_conf(mddev_t *mddev)
1981 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1993 conf->poolinfo->raid_disks = mddev->raid_disks;
2000 conf->poolinfo->mddev = mddev;
2003 list_for_each_entry(rdev, &mddev->disks, same_set) {
2005 if (disk_idx >= mddev->raid_disks
2014 conf->raid_disks = mddev->raid_disks;
2015 conf->mddev = mddev;
2045 mdname(mddev));
2049 conf->thread = md_register_thread(raid1d, mddev, NULL);
2053 mdname(mddev));
2071 static int run(mddev_t *mddev)
2077 if (mddev->level != 1) {
2079 mdname(mddev), mddev->level);
2082 if (mddev->reshape_position != MaxSector) {
2084 mdname(mddev));
2092 if (mddev->private == NULL)
2093 conf = setup_conf(mddev);
2095 conf = mddev->private;
2100 mddev->queue->queue_lock = &conf->device_lock;
2101 list_for_each_entry(rdev, &mddev->disks, same_set) {
2102 disk_stack_limits(mddev->gendisk, rdev->bdev,
2109 blk_queue_max_segments(mddev->queue, 1);
2110 blk_queue_segment_boundary(mddev->queue,
2115 mddev->degraded = 0;
2120 mddev->degraded++;
2122 if (conf->raid_disks - mddev->degraded == 1)
2123 mddev->recovery_cp = MaxSector;
2125 if (mddev->recovery_cp != MaxSector)
2128 mdname(mddev));
2131 mdname(mddev), mddev->raid_disks - mddev->degraded,
2132 mddev->raid_disks);
2137 mddev->thread = conf->thread;
2139 mddev->private = conf;
2141 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2143 mddev->queue->unplug_fn = raid1_unplug;
2144 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2145 mddev->queue->backing_dev_info.congested_data = mddev;
2146 md_integrity_register(mddev);
2150 static int stop(mddev_t *mddev)
2152 conf_t *conf = mddev->private;
2153 struct bitmap *bitmap = mddev->bitmap;
2158 mdname(mddev));
2167 md_unregister_thread(mddev->thread);
2168 mddev->thread = NULL;
2169 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2175 mddev->private = NULL;
2179 static int raid1_resize(mddev_t *mddev, sector_t sectors)
2188 md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
2189 if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
2191 set_capacity(mddev->gendisk, mddev->array_sectors);
2192 revalidate_disk(mddev->gendisk);
2193 if (sectors > mddev->dev_sectors &&
2194 mddev->recovery_cp == MaxSector) {
2195 mddev->recovery_cp = mddev->dev_sectors;
2196 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2198 mddev->dev_sectors = sectors;
2199 mddev->resync_max_sectors = sectors;
2203 static int raid1_reshape(mddev_t *mddev)
2219 conf_t *conf = mddev->private;
2225 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2226 mddev->layout != mddev->new_layout ||
2227 mddev->level != mddev->new_level) {
2228 mddev->new_chunk_sectors = mddev->chunk_sectors;
2229 mddev->new_layout = mddev->layout;
2230 mddev->new_level = mddev->level;
2234 err = md_allow_write(mddev);
2238 raid_disks = mddev->raid_disks + mddev->delta_disks;
2252 newpoolinfo->mddev = mddev;
2279 sysfs_remove_link(&mddev->kobj, nm);
2282 sysfs_remove_link(&mddev->kobj, nm);
2283 if (sysfs_create_link(&mddev->kobj,
2288 mdname(mddev), nm);
2299 mddev->degraded += (raid_disks - conf->raid_disks);
2301 conf->raid_disks = mddev->raid_disks = raid_disks;
2302 mddev->delta_disks = 0;
2307 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2308 md_wakeup_thread(mddev->thread);
2314 static void raid1_quiesce(mddev_t *mddev, int state)
2316 conf_t *conf = mddev->private;
2331 static void *raid1_takeover(mddev_t *mddev)
2336 if (mddev->level == 5 && mddev->raid_disks == 2) {
2338 mddev->new_level = 1;
2339 mddev->new_layout = 0;
2340 mddev->new_chunk_sectors = 0;
2341 conf = setup_conf(mddev);