1/* 2 * multipath.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * MULTIPATH management functions. 9 * 10 * derived from raid1.c. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * You should have received a copy of the GNU General Public License 18 * (for example /usr/src/linux/COPYING); if not, write to the Free 19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22#include <linux/blkdev.h> 23#include <linux/raid/md_u.h> 24#include <linux/seq_file.h> 25#include <linux/slab.h> 26#include "md.h" 27#include "multipath.h" 28 29#define MAX_WORK_PER_DISK 128 30 31#define NR_RESERVED_BUFS 32 32 33 34static int multipath_map (multipath_conf_t *conf) 35{ 36 int i, disks = conf->raid_disks; 37 38 /* 39 * Later we do read balancing on the read side 40 * now we use the first available disk. 41 */ 42 43 rcu_read_lock(); 44 for (i = 0; i < disks; i++) { 45 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 46 if (rdev && test_bit(In_sync, &rdev->flags)) { 47 atomic_inc(&rdev->nr_pending); 48 rcu_read_unlock(); 49 return i; 50 } 51 } 52 rcu_read_unlock(); 53 54 printk(KERN_ERR "multipath_map(): no more operational IO paths?\n"); 55 return (-1); 56} 57 58static void multipath_reschedule_retry (struct multipath_bh *mp_bh) 59{ 60 unsigned long flags; 61 mddev_t *mddev = mp_bh->mddev; 62 multipath_conf_t *conf = mddev->private; 63 64 spin_lock_irqsave(&conf->device_lock, flags); 65 list_add(&mp_bh->retry_list, &conf->retry_list); 66 spin_unlock_irqrestore(&conf->device_lock, flags); 67 md_wakeup_thread(mddev->thread); 68} 69 70 71/* 72 * multipath_end_bh_io() is called when we have finished servicing a multipathed 73 * operation and are ready to return a success/failure code to the buffer 74 * cache layer. 75 */ 76static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) 77{ 78 struct bio *bio = mp_bh->master_bio; 79 multipath_conf_t *conf = mp_bh->mddev->private; 80 81 bio_endio(bio, err); 82 mempool_free(mp_bh, conf->pool); 83} 84 85static void multipath_end_request(struct bio *bio, int error) 86{ 87 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 88 struct multipath_bh *mp_bh = bio->bi_private; 89 multipath_conf_t *conf = mp_bh->mddev->private; 90 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; 91 92 if (uptodate) 93 multipath_end_bh_io(mp_bh, 0); 94 else if (!(bio->bi_rw & REQ_RAHEAD)) { 95 /* 96 * oops, IO error: 97 */ 98 char b[BDEVNAME_SIZE]; 99 md_error (mp_bh->mddev, rdev); 100 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 101 bdevname(rdev->bdev,b), 102 (unsigned long long)bio->bi_sector); 103 multipath_reschedule_retry(mp_bh); 104 } else 105 multipath_end_bh_io(mp_bh, error); 106 rdev_dec_pending(rdev, conf->mddev); 107} 108 109static void unplug_slaves(mddev_t *mddev) 110{ 111 multipath_conf_t *conf = mddev->private; 112 int i; 113 114 rcu_read_lock(); 115 for (i=0; i<mddev->raid_disks; i++) { 116 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 117 if (rdev && !test_bit(Faulty, &rdev->flags) 118 && atomic_read(&rdev->nr_pending)) { 119 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 120 121 atomic_inc(&rdev->nr_pending); 122 rcu_read_unlock(); 123 124 blk_unplug(r_queue); 125 126 rdev_dec_pending(rdev, mddev); 127 rcu_read_lock(); 128 } 129 } 130 rcu_read_unlock(); 131} 132 133static void multipath_unplug(struct request_queue *q) 134{ 135 unplug_slaves(q->queuedata); 136} 137 138 139static int multipath_make_request(mddev_t *mddev, struct bio * bio) 140{ 141 multipath_conf_t *conf = mddev->private; 142 struct multipath_bh * mp_bh; 143 struct multipath_info *multipath; 144 145 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 146 md_barrier_request(mddev, bio); 147 return 0; 148 } 149 150 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 151 152 mp_bh->master_bio = bio; 153 mp_bh->mddev = mddev; 154 155 mp_bh->path = multipath_map(conf); 156 if (mp_bh->path < 0) { 157 bio_endio(bio, -EIO); 158 mempool_free(mp_bh, conf->pool); 159 return 0; 160 } 161 multipath = conf->multipaths + mp_bh->path; 162 163 mp_bh->bio = *bio; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 166 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 167 mp_bh->bio.bi_end_io = multipath_end_request; 168 mp_bh->bio.bi_private = mp_bh; 169 generic_make_request(&mp_bh->bio); 170 return 0; 171} 172 173static void multipath_status (struct seq_file *seq, mddev_t *mddev) 174{ 175 multipath_conf_t *conf = mddev->private; 176 int i; 177 178 seq_printf (seq, " [%d/%d] [", conf->raid_disks, 179 conf->working_disks); 180 for (i = 0; i < conf->raid_disks; i++) 181 seq_printf (seq, "%s", 182 conf->multipaths[i].rdev && 183 test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); 184 seq_printf (seq, "]"); 185} 186 187static int multipath_congested(void *data, int bits) 188{ 189 mddev_t *mddev = data; 190 multipath_conf_t *conf = mddev->private; 191 int i, ret = 0; 192 193 if (mddev_congested(mddev, bits)) 194 return 1; 195 196 rcu_read_lock(); 197 for (i = 0; i < mddev->raid_disks ; i++) { 198 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 199 if (rdev && !test_bit(Faulty, &rdev->flags)) { 200 struct request_queue *q = bdev_get_queue(rdev->bdev); 201 202 ret |= bdi_congested(&q->backing_dev_info, bits); 203 /* Just like multipath_map, we just check the 204 * first available device 205 */ 206 break; 207 } 208 } 209 rcu_read_unlock(); 210 return ret; 211} 212 213/* 214 * Careful, this can execute in IRQ contexts as well! 215 */ 216static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) 217{ 218 multipath_conf_t *conf = mddev->private; 219 220 if (conf->working_disks <= 1) { 221 /* 222 * Uh oh, we can do nothing if this is our last path, but 223 * first check if this is a queued request for a device 224 * which has just failed. 225 */ 226 printk(KERN_ALERT 227 "multipath: only one IO path left and IO error.\n"); 228 /* leave it active... it's all we have */ 229 } else { 230 /* 231 * Mark disk as unusable 232 */ 233 if (!test_bit(Faulty, &rdev->flags)) { 234 char b[BDEVNAME_SIZE]; 235 clear_bit(In_sync, &rdev->flags); 236 set_bit(Faulty, &rdev->flags); 237 set_bit(MD_CHANGE_DEVS, &mddev->flags); 238 conf->working_disks--; 239 mddev->degraded++; 240 printk(KERN_ALERT "multipath: IO failure on %s," 241 " disabling IO path.\n" 242 "multipath: Operation continuing" 243 " on %d IO paths.\n", 244 bdevname (rdev->bdev,b), 245 conf->working_disks); 246 } 247 } 248} 249 250static void print_multipath_conf (multipath_conf_t *conf) 251{ 252 int i; 253 struct multipath_info *tmp; 254 255 printk("MULTIPATH conf printout:\n"); 256 if (!conf) { 257 printk("(conf==NULL)\n"); 258 return; 259 } 260 printk(" --- wd:%d rd:%d\n", conf->working_disks, 261 conf->raid_disks); 262 263 for (i = 0; i < conf->raid_disks; i++) { 264 char b[BDEVNAME_SIZE]; 265 tmp = conf->multipaths + i; 266 if (tmp->rdev) 267 printk(" disk%d, o:%d, dev:%s\n", 268 i,!test_bit(Faulty, &tmp->rdev->flags), 269 bdevname(tmp->rdev->bdev,b)); 270 } 271} 272 273 274static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 275{ 276 multipath_conf_t *conf = mddev->private; 277 struct request_queue *q; 278 int err = -EEXIST; 279 int path; 280 struct multipath_info *p; 281 int first = 0; 282 int last = mddev->raid_disks - 1; 283 284 if (rdev->raid_disk >= 0) 285 first = last = rdev->raid_disk; 286 287 print_multipath_conf(conf); 288 289 for (path = first; path <= last; path++) 290 if ((p=conf->multipaths+path)->rdev == NULL) { 291 q = rdev->bdev->bd_disk->queue; 292 disk_stack_limits(mddev->gendisk, rdev->bdev, 293 rdev->data_offset << 9); 294 295 /* as we don't honour merge_bvec_fn, we must never risk 296 * violating it, so limit ->max_segments to one, lying 297 * within a single page. 298 * (Note: it is very unlikely that a device with 299 * merge_bvec_fn will be involved in multipath.) 300 */ 301 if (q->merge_bvec_fn) { 302 blk_queue_max_segments(mddev->queue, 1); 303 blk_queue_segment_boundary(mddev->queue, 304 PAGE_CACHE_SIZE - 1); 305 } 306 307 conf->working_disks++; 308 mddev->degraded--; 309 rdev->raid_disk = path; 310 set_bit(In_sync, &rdev->flags); 311 rcu_assign_pointer(p->rdev, rdev); 312 err = 0; 313 md_integrity_add_rdev(rdev, mddev); 314 break; 315 } 316 317 print_multipath_conf(conf); 318 319 return err; 320} 321 322static int multipath_remove_disk(mddev_t *mddev, int number) 323{ 324 multipath_conf_t *conf = mddev->private; 325 int err = 0; 326 mdk_rdev_t *rdev; 327 struct multipath_info *p = conf->multipaths + number; 328 329 print_multipath_conf(conf); 330 331 rdev = p->rdev; 332 if (rdev) { 333 if (test_bit(In_sync, &rdev->flags) || 334 atomic_read(&rdev->nr_pending)) { 335 printk(KERN_ERR "hot-remove-disk, slot %d is identified" 336 " but is still operational!\n", number); 337 err = -EBUSY; 338 goto abort; 339 } 340 p->rdev = NULL; 341 synchronize_rcu(); 342 if (atomic_read(&rdev->nr_pending)) { 343 /* lost the race, try later */ 344 err = -EBUSY; 345 p->rdev = rdev; 346 goto abort; 347 } 348 md_integrity_register(mddev); 349 } 350abort: 351 352 print_multipath_conf(conf); 353 return err; 354} 355 356 357 358/* 359 * This is a kernel thread which: 360 * 361 * 1. Retries failed read operations on working multipaths. 362 * 2. Updates the raid superblock when problems encounter. 363 * 3. Performs writes following reads for array syncronising. 364 */ 365 366static void multipathd (mddev_t *mddev) 367{ 368 struct multipath_bh *mp_bh; 369 struct bio *bio; 370 unsigned long flags; 371 multipath_conf_t *conf = mddev->private; 372 struct list_head *head = &conf->retry_list; 373 374 md_check_recovery(mddev); 375 for (;;) { 376 char b[BDEVNAME_SIZE]; 377 spin_lock_irqsave(&conf->device_lock, flags); 378 if (list_empty(head)) 379 break; 380 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list); 381 list_del(head->prev); 382 spin_unlock_irqrestore(&conf->device_lock, flags); 383 384 bio = &mp_bh->bio; 385 bio->bi_sector = mp_bh->master_bio->bi_sector; 386 387 if ((mp_bh->path = multipath_map (conf))<0) { 388 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 389 " error for block %llu\n", 390 bdevname(bio->bi_bdev,b), 391 (unsigned long long)bio->bi_sector); 392 multipath_end_bh_io(mp_bh, -EIO); 393 } else { 394 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 395 " to another IO path\n", 396 bdevname(bio->bi_bdev,b), 397 (unsigned long long)bio->bi_sector); 398 *bio = *(mp_bh->master_bio); 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 401 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 402 bio->bi_end_io = multipath_end_request; 403 bio->bi_private = mp_bh; 404 generic_make_request(bio); 405 } 406 } 407 spin_unlock_irqrestore(&conf->device_lock, flags); 408} 409 410static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) 411{ 412 WARN_ONCE(sectors || raid_disks, 413 "%s does not support generic reshape\n", __func__); 414 415 return mddev->dev_sectors; 416} 417 418static int multipath_run (mddev_t *mddev) 419{ 420 multipath_conf_t *conf; 421 int disk_idx; 422 struct multipath_info *disk; 423 mdk_rdev_t *rdev; 424 425 if (md_check_no_bitmap(mddev)) 426 return -EINVAL; 427 428 if (mddev->level != LEVEL_MULTIPATH) { 429 printk("multipath: %s: raid level not set to multipath IO (%d)\n", 430 mdname(mddev), mddev->level); 431 goto out; 432 } 433 /* 434 * copy the already verified devices into our private MULTIPATH 435 * bookkeeping area. [whatever we allocate in multipath_run(), 436 * should be freed in multipath_stop()] 437 */ 438 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 439 440 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 441 mddev->private = conf; 442 if (!conf) { 443 printk(KERN_ERR 444 "multipath: couldn't allocate memory for %s\n", 445 mdname(mddev)); 446 goto out; 447 } 448 449 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, 450 GFP_KERNEL); 451 if (!conf->multipaths) { 452 printk(KERN_ERR 453 "multipath: couldn't allocate memory for %s\n", 454 mdname(mddev)); 455 goto out_free_conf; 456 } 457 458 conf->working_disks = 0; 459 list_for_each_entry(rdev, &mddev->disks, same_set) { 460 disk_idx = rdev->raid_disk; 461 if (disk_idx < 0 || 462 disk_idx >= mddev->raid_disks) 463 continue; 464 465 disk = conf->multipaths + disk_idx; 466 disk->rdev = rdev; 467 disk_stack_limits(mddev->gendisk, rdev->bdev, 468 rdev->data_offset << 9); 469 470 /* as we don't honour merge_bvec_fn, we must never risk 471 * violating it, not that we ever expect a device with 472 * a merge_bvec_fn to be involved in multipath */ 473 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 474 blk_queue_max_segments(mddev->queue, 1); 475 blk_queue_segment_boundary(mddev->queue, 476 PAGE_CACHE_SIZE - 1); 477 } 478 479 if (!test_bit(Faulty, &rdev->flags)) 480 conf->working_disks++; 481 } 482 483 conf->raid_disks = mddev->raid_disks; 484 conf->mddev = mddev; 485 spin_lock_init(&conf->device_lock); 486 INIT_LIST_HEAD(&conf->retry_list); 487 488 if (!conf->working_disks) { 489 printk(KERN_ERR "multipath: no operational IO paths for %s\n", 490 mdname(mddev)); 491 goto out_free_conf; 492 } 493 mddev->degraded = conf->raid_disks - conf->working_disks; 494 495 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, 496 sizeof(struct multipath_bh)); 497 if (conf->pool == NULL) { 498 printk(KERN_ERR 499 "multipath: couldn't allocate memory for %s\n", 500 mdname(mddev)); 501 goto out_free_conf; 502 } 503 504 { 505 mddev->thread = md_register_thread(multipathd, mddev, NULL); 506 if (!mddev->thread) { 507 printk(KERN_ERR "multipath: couldn't allocate thread" 508 " for %s\n", mdname(mddev)); 509 goto out_free_conf; 510 } 511 } 512 513 printk(KERN_INFO 514 "multipath: array %s active with %d out of %d IO paths\n", 515 mdname(mddev), conf->working_disks, mddev->raid_disks); 516 /* 517 * Ok, everything is just fine now 518 */ 519 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 520 521 mddev->queue->unplug_fn = multipath_unplug; 522 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 523 mddev->queue->backing_dev_info.congested_data = mddev; 524 md_integrity_register(mddev); 525 return 0; 526 527out_free_conf: 528 if (conf->pool) 529 mempool_destroy(conf->pool); 530 kfree(conf->multipaths); 531 kfree(conf); 532 mddev->private = NULL; 533out: 534 return -EIO; 535} 536 537 538static int multipath_stop (mddev_t *mddev) 539{ 540 multipath_conf_t *conf = mddev->private; 541 542 md_unregister_thread(mddev->thread); 543 mddev->thread = NULL; 544 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 545 mempool_destroy(conf->pool); 546 kfree(conf->multipaths); 547 kfree(conf); 548 mddev->private = NULL; 549 return 0; 550} 551 552static struct mdk_personality multipath_personality = 553{ 554 .name = "multipath", 555 .level = LEVEL_MULTIPATH, 556 .owner = THIS_MODULE, 557 .make_request = multipath_make_request, 558 .run = multipath_run, 559 .stop = multipath_stop, 560 .status = multipath_status, 561 .error_handler = multipath_error, 562 .hot_add_disk = multipath_add_disk, 563 .hot_remove_disk= multipath_remove_disk, 564 .size = multipath_size, 565}; 566 567static int __init multipath_init (void) 568{ 569 return register_md_personality (&multipath_personality); 570} 571 572static void __exit multipath_exit (void) 573{ 574 unregister_md_personality (&multipath_personality); 575} 576 577module_init(multipath_init); 578module_exit(multipath_exit); 579MODULE_LICENSE("GPL"); 580MODULE_DESCRIPTION("simple multi-path personality for MD"); 581MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ 582MODULE_ALIAS("md-multipath"); 583MODULE_ALIAS("md-level--4"); 584