1/* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33*/ 34 35#include <linux/kthread.h> 36#include <linux/blkdev.h> 37#include <linux/sysctl.h> 38#include <linux/seq_file.h> 39#include <linux/smp_lock.h> 40#include <linux/buffer_head.h> /* for invalidate_bdev */ 41#include <linux/poll.h> 42#include <linux/ctype.h> 43#include <linux/string.h> 44#include <linux/hdreg.h> 45#include <linux/proc_fs.h> 46#include <linux/random.h> 47#include <linux/reboot.h> 48#include <linux/file.h> 49#include <linux/compat.h> 50#include <linux/delay.h> 51#include <linux/raid/md_p.h> 52#include <linux/raid/md_u.h> 53#include <linux/slab.h> 54#include "md.h" 55#include "bitmap.h" 56 57#define DEBUG 0 58#define dprintk(x...) ((void)(DEBUG && printk(x))) 59 60 61#ifndef MODULE 62static void autostart_arrays(int part); 63#endif 64 65static LIST_HEAD(pers_list); 66static DEFINE_SPINLOCK(pers_lock); 67 68static void md_print_devices(void); 69 70static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 71 72#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 73 74/* 75 * Default number of read corrections we'll attempt on an rdev 76 * before ejecting it from the array. We divide the read error 77 * count by 2 for every hour elapsed between read errors. 78 */ 79#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 80/* 81 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 82 * is 1000 KB/sec, so the extra system load does not show up that much. 83 * Increase it if you want to have more _guaranteed_ speed. Note that 84 * the RAID driver will use the maximum available bandwidth if the IO 85 * subsystem is idle. There is also an 'absolute maximum' reconstruction 86 * speed limit - in case reconstruction slows down your system despite 87 * idle IO detection. 88 * 89 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 90 * or /sys/block/mdX/md/sync_speed_{min,max} 91 */ 92 93static int sysctl_speed_limit_min = 1000; 94static int sysctl_speed_limit_max = 200000; 95static inline int speed_min(mddev_t *mddev) 96{ 97 return mddev->sync_speed_min ? 98 mddev->sync_speed_min : sysctl_speed_limit_min; 99} 100 101static inline int speed_max(mddev_t *mddev) 102{ 103 return mddev->sync_speed_max ? 104 mddev->sync_speed_max : sysctl_speed_limit_max; 105} 106 107static struct ctl_table_header *raid_table_header; 108 109static ctl_table raid_table[] = { 110 { 111 .procname = "speed_limit_min", 112 .data = &sysctl_speed_limit_min, 113 .maxlen = sizeof(int), 114 .mode = S_IRUGO|S_IWUSR, 115 .proc_handler = proc_dointvec, 116 }, 117 { 118 .procname = "speed_limit_max", 119 .data = &sysctl_speed_limit_max, 120 .maxlen = sizeof(int), 121 .mode = S_IRUGO|S_IWUSR, 122 .proc_handler = proc_dointvec, 123 }, 124 { } 125}; 126 127static ctl_table raid_dir_table[] = { 128 { 129 .procname = "raid", 130 .maxlen = 0, 131 .mode = S_IRUGO|S_IXUGO, 132 .child = raid_table, 133 }, 134 { } 135}; 136 137static ctl_table raid_root_table[] = { 138 { 139 .procname = "dev", 140 .maxlen = 0, 141 .mode = 0555, 142 .child = raid_dir_table, 143 }, 144 { } 145}; 146 147static const struct block_device_operations md_fops; 148 149static int start_readonly; 150 151/* 152 * We have a system wide 'event count' that is incremented 153 * on any 'interesting' event, and readers of /proc/mdstat 154 * can use 'poll' or 'select' to find out when the event 155 * count increases. 156 * 157 * Events are: 158 * start array, stop array, error, add device, remove device, 159 * start build, activate spare 160 */ 161static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 162static atomic_t md_event_count; 163void md_new_event(mddev_t *mddev) 164{ 165 atomic_inc(&md_event_count); 166 wake_up(&md_event_waiters); 167} 168EXPORT_SYMBOL_GPL(md_new_event); 169 170/* Alternate version that can be called from interrupts 171 * when calling sysfs_notify isn't needed. 172 */ 173static void md_new_event_inintr(mddev_t *mddev) 174{ 175 atomic_inc(&md_event_count); 176 wake_up(&md_event_waiters); 177} 178 179/* 180 * Enables to iterate over all existing md arrays 181 * all_mddevs_lock protects this list. 182 */ 183static LIST_HEAD(all_mddevs); 184static DEFINE_SPINLOCK(all_mddevs_lock); 185 186 187/* 188 * iterates through all used mddevs in the system. 189 * We take care to grab the all_mddevs_lock whenever navigating 190 * the list, and to always hold a refcount when unlocked. 191 * Any code which breaks out of this loop while own 192 * a reference to the current mddev and must mddev_put it. 193 */ 194#define for_each_mddev(mddev,tmp) \ 195 \ 196 for (({ spin_lock(&all_mddevs_lock); \ 197 tmp = all_mddevs.next; \ 198 mddev = NULL;}); \ 199 ({ if (tmp != &all_mddevs) \ 200 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 201 spin_unlock(&all_mddevs_lock); \ 202 if (mddev) mddev_put(mddev); \ 203 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 204 tmp != &all_mddevs;}); \ 205 ({ spin_lock(&all_mddevs_lock); \ 206 tmp = tmp->next;}) \ 207 ) 208 209 210/* Rather than calling directly into the personality make_request function, 211 * IO requests come here first so that we can check if the device is 212 * being suspended pending a reconfiguration. 213 * We hold a refcount over the call to ->make_request. By the time that 214 * call has finished, the bio has been linked into some internal structure 215 * and so is visible to ->quiesce(), so we don't need the refcount any more. 216 */ 217static int md_make_request(struct request_queue *q, struct bio *bio) 218{ 219 const int rw = bio_data_dir(bio); 220 mddev_t *mddev = q->queuedata; 221 int rv; 222 int cpu; 223 unsigned int sectors; 224 225 if (mddev == NULL || mddev->pers == NULL 226 || !mddev->ready) { 227 bio_io_error(bio); 228 return 0; 229 } 230 smp_rmb(); /* Ensure implications of 'active' are visible */ 231 rcu_read_lock(); 232 if (mddev->suspended || mddev->barrier) { 233 DEFINE_WAIT(__wait); 234 for (;;) { 235 prepare_to_wait(&mddev->sb_wait, &__wait, 236 TASK_UNINTERRUPTIBLE); 237 if (!mddev->suspended && !mddev->barrier) 238 break; 239 rcu_read_unlock(); 240 schedule(); 241 rcu_read_lock(); 242 } 243 finish_wait(&mddev->sb_wait, &__wait); 244 } 245 atomic_inc(&mddev->active_io); 246 rcu_read_unlock(); 247 248 /* 249 * save the sectors now since our bio can 250 * go away inside make_request 251 */ 252 sectors = bio_sectors(bio); 253 rv = mddev->pers->make_request(mddev, bio); 254 255 cpu = part_stat_lock(); 256 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 257 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 258 part_stat_unlock(); 259 260 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 261 wake_up(&mddev->sb_wait); 262 263 return rv; 264} 265 266/* mddev_suspend makes sure no new requests are submitted 267 * to the device, and that any requests that have been submitted 268 * are completely handled. 269 * Once ->stop is called and completes, the module will be completely 270 * unused. 271 */ 272void mddev_suspend(mddev_t *mddev) 273{ 274 BUG_ON(mddev->suspended); 275 mddev->suspended = 1; 276 synchronize_rcu(); 277 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 278 mddev->pers->quiesce(mddev, 1); 279} 280EXPORT_SYMBOL_GPL(mddev_suspend); 281 282void mddev_resume(mddev_t *mddev) 283{ 284 mddev->suspended = 0; 285 wake_up(&mddev->sb_wait); 286 mddev->pers->quiesce(mddev, 0); 287} 288EXPORT_SYMBOL_GPL(mddev_resume); 289 290int mddev_congested(mddev_t *mddev, int bits) 291{ 292 if (mddev->barrier) 293 return 1; 294 return mddev->suspended; 295} 296EXPORT_SYMBOL(mddev_congested); 297 298/* 299 * Generic barrier handling for md 300 */ 301 302#define POST_REQUEST_BARRIER ((void*)1) 303 304static void md_end_barrier(struct bio *bio, int err) 305{ 306 mdk_rdev_t *rdev = bio->bi_private; 307 mddev_t *mddev = rdev->mddev; 308 if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER) 309 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags); 310 311 rdev_dec_pending(rdev, mddev); 312 313 if (atomic_dec_and_test(&mddev->flush_pending)) { 314 if (mddev->barrier == POST_REQUEST_BARRIER) { 315 /* This was a post-request barrier */ 316 mddev->barrier = NULL; 317 wake_up(&mddev->sb_wait); 318 } else 319 /* The pre-request barrier has finished */ 320 schedule_work(&mddev->barrier_work); 321 } 322 bio_put(bio); 323} 324 325static void submit_barriers(mddev_t *mddev) 326{ 327 mdk_rdev_t *rdev; 328 329 rcu_read_lock(); 330 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 331 if (rdev->raid_disk >= 0 && 332 !test_bit(Faulty, &rdev->flags)) { 333 /* Take two references, one is dropped 334 * when request finishes, one after 335 * we reclaim rcu_read_lock 336 */ 337 struct bio *bi; 338 atomic_inc(&rdev->nr_pending); 339 atomic_inc(&rdev->nr_pending); 340 rcu_read_unlock(); 341 bi = bio_alloc(GFP_KERNEL, 0); 342 bi->bi_end_io = md_end_barrier; 343 bi->bi_private = rdev; 344 bi->bi_bdev = rdev->bdev; 345 atomic_inc(&mddev->flush_pending); 346 submit_bio(WRITE_BARRIER, bi); 347 rcu_read_lock(); 348 rdev_dec_pending(rdev, mddev); 349 } 350 rcu_read_unlock(); 351} 352 353static void md_submit_barrier(struct work_struct *ws) 354{ 355 mddev_t *mddev = container_of(ws, mddev_t, barrier_work); 356 struct bio *bio = mddev->barrier; 357 358 atomic_set(&mddev->flush_pending, 1); 359 360 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) 361 bio_endio(bio, -EOPNOTSUPP); 362 else if (bio->bi_size == 0) 363 /* an empty barrier - all done */ 364 bio_endio(bio, 0); 365 else { 366 bio->bi_rw &= ~REQ_HARDBARRIER; 367 if (mddev->pers->make_request(mddev, bio)) 368 generic_make_request(bio); 369 mddev->barrier = POST_REQUEST_BARRIER; 370 submit_barriers(mddev); 371 } 372 if (atomic_dec_and_test(&mddev->flush_pending)) { 373 mddev->barrier = NULL; 374 wake_up(&mddev->sb_wait); 375 } 376} 377 378void md_barrier_request(mddev_t *mddev, struct bio *bio) 379{ 380 spin_lock_irq(&mddev->write_lock); 381 wait_event_lock_irq(mddev->sb_wait, 382 !mddev->barrier, 383 mddev->write_lock, /*nothing*/); 384 mddev->barrier = bio; 385 spin_unlock_irq(&mddev->write_lock); 386 387 atomic_set(&mddev->flush_pending, 1); 388 INIT_WORK(&mddev->barrier_work, md_submit_barrier); 389 390 submit_barriers(mddev); 391 392 if (atomic_dec_and_test(&mddev->flush_pending)) 393 schedule_work(&mddev->barrier_work); 394} 395EXPORT_SYMBOL(md_barrier_request); 396 397/* Support for plugging. 398 * This mirrors the plugging support in request_queue, but does not 399 * require having a whole queue 400 */ 401static void plugger_work(struct work_struct *work) 402{ 403 struct plug_handle *plug = 404 container_of(work, struct plug_handle, unplug_work); 405 plug->unplug_fn(plug); 406} 407static void plugger_timeout(unsigned long data) 408{ 409 struct plug_handle *plug = (void *)data; 410 kblockd_schedule_work(NULL, &plug->unplug_work); 411} 412void plugger_init(struct plug_handle *plug, 413 void (*unplug_fn)(struct plug_handle *)) 414{ 415 plug->unplug_flag = 0; 416 plug->unplug_fn = unplug_fn; 417 init_timer(&plug->unplug_timer); 418 plug->unplug_timer.function = plugger_timeout; 419 plug->unplug_timer.data = (unsigned long)plug; 420 INIT_WORK(&plug->unplug_work, plugger_work); 421} 422EXPORT_SYMBOL_GPL(plugger_init); 423 424void plugger_set_plug(struct plug_handle *plug) 425{ 426 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) 427 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); 428} 429EXPORT_SYMBOL_GPL(plugger_set_plug); 430 431int plugger_remove_plug(struct plug_handle *plug) 432{ 433 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { 434 del_timer(&plug->unplug_timer); 435 return 1; 436 } else 437 return 0; 438} 439EXPORT_SYMBOL_GPL(plugger_remove_plug); 440 441 442static inline mddev_t *mddev_get(mddev_t *mddev) 443{ 444 atomic_inc(&mddev->active); 445 return mddev; 446} 447 448static void mddev_delayed_delete(struct work_struct *ws); 449 450static void mddev_put(mddev_t *mddev) 451{ 452 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 453 return; 454 if (!mddev->raid_disks && list_empty(&mddev->disks) && 455 mddev->ctime == 0 && !mddev->hold_active) { 456 /* Array is not configured at all, and not held active, 457 * so destroy it */ 458 list_del(&mddev->all_mddevs); 459 if (mddev->gendisk) { 460 /* we did a probe so need to clean up. 461 * Call schedule_work inside the spinlock 462 * so that flush_scheduled_work() after 463 * mddev_find will succeed in waiting for the 464 * work to be done. 465 */ 466 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 467 schedule_work(&mddev->del_work); 468 } else 469 kfree(mddev); 470 } 471 spin_unlock(&all_mddevs_lock); 472} 473 474void mddev_init(mddev_t *mddev) 475{ 476 mutex_init(&mddev->open_mutex); 477 mutex_init(&mddev->reconfig_mutex); 478 mutex_init(&mddev->bitmap_info.mutex); 479 INIT_LIST_HEAD(&mddev->disks); 480 INIT_LIST_HEAD(&mddev->all_mddevs); 481 init_timer(&mddev->safemode_timer); 482 atomic_set(&mddev->active, 1); 483 atomic_set(&mddev->openers, 0); 484 atomic_set(&mddev->active_io, 0); 485 spin_lock_init(&mddev->write_lock); 486 atomic_set(&mddev->flush_pending, 0); 487 init_waitqueue_head(&mddev->sb_wait); 488 init_waitqueue_head(&mddev->recovery_wait); 489 mddev->reshape_position = MaxSector; 490 mddev->resync_min = 0; 491 mddev->resync_max = MaxSector; 492 mddev->level = LEVEL_NONE; 493} 494EXPORT_SYMBOL_GPL(mddev_init); 495 496static mddev_t * mddev_find(dev_t unit) 497{ 498 mddev_t *mddev, *new = NULL; 499 500 retry: 501 spin_lock(&all_mddevs_lock); 502 503 if (unit) { 504 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 505 if (mddev->unit == unit) { 506 mddev_get(mddev); 507 spin_unlock(&all_mddevs_lock); 508 kfree(new); 509 return mddev; 510 } 511 512 if (new) { 513 list_add(&new->all_mddevs, &all_mddevs); 514 spin_unlock(&all_mddevs_lock); 515 new->hold_active = UNTIL_IOCTL; 516 return new; 517 } 518 } else if (new) { 519 /* find an unused unit number */ 520 static int next_minor = 512; 521 int start = next_minor; 522 int is_free = 0; 523 int dev = 0; 524 while (!is_free) { 525 dev = MKDEV(MD_MAJOR, next_minor); 526 next_minor++; 527 if (next_minor > MINORMASK) 528 next_minor = 0; 529 if (next_minor == start) { 530 /* Oh dear, all in use. */ 531 spin_unlock(&all_mddevs_lock); 532 kfree(new); 533 return NULL; 534 } 535 536 is_free = 1; 537 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 538 if (mddev->unit == dev) { 539 is_free = 0; 540 break; 541 } 542 } 543 new->unit = dev; 544 new->md_minor = MINOR(dev); 545 new->hold_active = UNTIL_STOP; 546 list_add(&new->all_mddevs, &all_mddevs); 547 spin_unlock(&all_mddevs_lock); 548 return new; 549 } 550 spin_unlock(&all_mddevs_lock); 551 552 new = kzalloc(sizeof(*new), GFP_KERNEL); 553 if (!new) 554 return NULL; 555 556 new->unit = unit; 557 if (MAJOR(unit) == MD_MAJOR) 558 new->md_minor = MINOR(unit); 559 else 560 new->md_minor = MINOR(unit) >> MdpMinorShift; 561 562 mddev_init(new); 563 564 goto retry; 565} 566 567static inline int mddev_lock(mddev_t * mddev) 568{ 569 return mutex_lock_interruptible(&mddev->reconfig_mutex); 570} 571 572static inline int mddev_is_locked(mddev_t *mddev) 573{ 574 return mutex_is_locked(&mddev->reconfig_mutex); 575} 576 577static inline int mddev_trylock(mddev_t * mddev) 578{ 579 return mutex_trylock(&mddev->reconfig_mutex); 580} 581 582static struct attribute_group md_redundancy_group; 583 584static void mddev_unlock(mddev_t * mddev) 585{ 586 if (mddev->to_remove) { 587 /* These cannot be removed under reconfig_mutex as 588 * an access to the files will try to take reconfig_mutex 589 * while holding the file unremovable, which leads to 590 * a deadlock. 591 * So hold set sysfs_active while the remove in happeing, 592 * and anything else which might set ->to_remove or my 593 * otherwise change the sysfs namespace will fail with 594 * -EBUSY if sysfs_active is still set. 595 * We set sysfs_active under reconfig_mutex and elsewhere 596 * test it under the same mutex to ensure its correct value 597 * is seen. 598 */ 599 struct attribute_group *to_remove = mddev->to_remove; 600 mddev->to_remove = NULL; 601 mddev->sysfs_active = 1; 602 mutex_unlock(&mddev->reconfig_mutex); 603 604 if (mddev->kobj.sd) { 605 if (to_remove != &md_redundancy_group) 606 sysfs_remove_group(&mddev->kobj, to_remove); 607 if (mddev->pers == NULL || 608 mddev->pers->sync_request == NULL) { 609 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 610 if (mddev->sysfs_action) 611 sysfs_put(mddev->sysfs_action); 612 mddev->sysfs_action = NULL; 613 } 614 } 615 mddev->sysfs_active = 0; 616 } else 617 mutex_unlock(&mddev->reconfig_mutex); 618 619 md_wakeup_thread(mddev->thread); 620} 621 622static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 623{ 624 mdk_rdev_t *rdev; 625 626 list_for_each_entry(rdev, &mddev->disks, same_set) 627 if (rdev->desc_nr == nr) 628 return rdev; 629 630 return NULL; 631} 632 633static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 634{ 635 mdk_rdev_t *rdev; 636 637 list_for_each_entry(rdev, &mddev->disks, same_set) 638 if (rdev->bdev->bd_dev == dev) 639 return rdev; 640 641 return NULL; 642} 643 644static struct mdk_personality *find_pers(int level, char *clevel) 645{ 646 struct mdk_personality *pers; 647 list_for_each_entry(pers, &pers_list, list) { 648 if (level != LEVEL_NONE && pers->level == level) 649 return pers; 650 if (strcmp(pers->name, clevel)==0) 651 return pers; 652 } 653 return NULL; 654} 655 656/* return the offset of the super block in 512byte sectors */ 657static inline sector_t calc_dev_sboffset(struct block_device *bdev) 658{ 659 sector_t num_sectors = bdev->bd_inode->i_size / 512; 660 return MD_NEW_SIZE_SECTORS(num_sectors); 661} 662 663static int alloc_disk_sb(mdk_rdev_t * rdev) 664{ 665 if (rdev->sb_page) 666 MD_BUG(); 667 668 rdev->sb_page = alloc_page(GFP_KERNEL); 669 if (!rdev->sb_page) { 670 printk(KERN_ALERT "md: out of memory.\n"); 671 return -ENOMEM; 672 } 673 674 return 0; 675} 676 677static void free_disk_sb(mdk_rdev_t * rdev) 678{ 679 if (rdev->sb_page) { 680 put_page(rdev->sb_page); 681 rdev->sb_loaded = 0; 682 rdev->sb_page = NULL; 683 rdev->sb_start = 0; 684 rdev->sectors = 0; 685 } 686} 687 688 689static void super_written(struct bio *bio, int error) 690{ 691 mdk_rdev_t *rdev = bio->bi_private; 692 mddev_t *mddev = rdev->mddev; 693 694 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 695 printk("md: super_written gets error=%d, uptodate=%d\n", 696 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 697 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 698 md_error(mddev, rdev); 699 } 700 701 if (atomic_dec_and_test(&mddev->pending_writes)) 702 wake_up(&mddev->sb_wait); 703 bio_put(bio); 704} 705 706static void super_written_barrier(struct bio *bio, int error) 707{ 708 struct bio *bio2 = bio->bi_private; 709 mdk_rdev_t *rdev = bio2->bi_private; 710 mddev_t *mddev = rdev->mddev; 711 712 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 713 error == -EOPNOTSUPP) { 714 unsigned long flags; 715 /* barriers don't appear to be supported :-( */ 716 set_bit(BarriersNotsupp, &rdev->flags); 717 mddev->barriers_work = 0; 718 spin_lock_irqsave(&mddev->write_lock, flags); 719 bio2->bi_next = mddev->biolist; 720 mddev->biolist = bio2; 721 spin_unlock_irqrestore(&mddev->write_lock, flags); 722 wake_up(&mddev->sb_wait); 723 bio_put(bio); 724 } else { 725 bio_put(bio2); 726 bio->bi_private = rdev; 727 super_written(bio, error); 728 } 729} 730 731void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 732 sector_t sector, int size, struct page *page) 733{ 734 /* write first size bytes of page to sector of rdev 735 * Increment mddev->pending_writes before returning 736 * and decrement it on completion, waking up sb_wait 737 * if zero is reached. 738 * If an error occurred, call md_error 739 * 740 * As we might need to resubmit the request if REQ_HARDBARRIER 741 * causes ENOTSUPP, we allocate a spare bio... 742 */ 743 struct bio *bio = bio_alloc(GFP_NOIO, 1); 744 int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG; 745 746 bio->bi_bdev = rdev->bdev; 747 bio->bi_sector = sector; 748 bio_add_page(bio, page, size, 0); 749 bio->bi_private = rdev; 750 bio->bi_end_io = super_written; 751 bio->bi_rw = rw; 752 753 atomic_inc(&mddev->pending_writes); 754 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 755 struct bio *rbio; 756 rw |= REQ_HARDBARRIER; 757 rbio = bio_clone(bio, GFP_NOIO); 758 rbio->bi_private = bio; 759 rbio->bi_end_io = super_written_barrier; 760 submit_bio(rw, rbio); 761 } else 762 submit_bio(rw, bio); 763} 764 765void md_super_wait(mddev_t *mddev) 766{ 767 /* wait for all superblock writes that were scheduled to complete. 768 * if any had to be retried (due to BARRIER problems), retry them 769 */ 770 DEFINE_WAIT(wq); 771 for(;;) { 772 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 773 if (atomic_read(&mddev->pending_writes)==0) 774 break; 775 while (mddev->biolist) { 776 struct bio *bio; 777 spin_lock_irq(&mddev->write_lock); 778 bio = mddev->biolist; 779 mddev->biolist = bio->bi_next ; 780 bio->bi_next = NULL; 781 spin_unlock_irq(&mddev->write_lock); 782 submit_bio(bio->bi_rw, bio); 783 } 784 schedule(); 785 } 786 finish_wait(&mddev->sb_wait, &wq); 787} 788 789static void bi_complete(struct bio *bio, int error) 790{ 791 complete((struct completion*)bio->bi_private); 792} 793 794int sync_page_io(struct block_device *bdev, sector_t sector, int size, 795 struct page *page, int rw) 796{ 797 struct bio *bio = bio_alloc(GFP_NOIO, 1); 798 struct completion event; 799 int ret; 800 801 rw |= REQ_SYNC | REQ_UNPLUG; 802 803 bio->bi_bdev = bdev; 804 bio->bi_sector = sector; 805 bio_add_page(bio, page, size, 0); 806 init_completion(&event); 807 bio->bi_private = &event; 808 bio->bi_end_io = bi_complete; 809 submit_bio(rw, bio); 810 wait_for_completion(&event); 811 812 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 813 bio_put(bio); 814 return ret; 815} 816EXPORT_SYMBOL_GPL(sync_page_io); 817 818static int read_disk_sb(mdk_rdev_t * rdev, int size) 819{ 820 char b[BDEVNAME_SIZE]; 821 if (!rdev->sb_page) { 822 MD_BUG(); 823 return -EINVAL; 824 } 825 if (rdev->sb_loaded) 826 return 0; 827 828 829 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) 830 goto fail; 831 rdev->sb_loaded = 1; 832 return 0; 833 834fail: 835 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 836 bdevname(rdev->bdev,b)); 837 return -EINVAL; 838} 839 840static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 841{ 842 return sb1->set_uuid0 == sb2->set_uuid0 && 843 sb1->set_uuid1 == sb2->set_uuid1 && 844 sb1->set_uuid2 == sb2->set_uuid2 && 845 sb1->set_uuid3 == sb2->set_uuid3; 846} 847 848static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 849{ 850 int ret; 851 mdp_super_t *tmp1, *tmp2; 852 853 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 854 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 855 856 if (!tmp1 || !tmp2) { 857 ret = 0; 858 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 859 goto abort; 860 } 861 862 *tmp1 = *sb1; 863 *tmp2 = *sb2; 864 865 /* 866 * nr_disks is not constant 867 */ 868 tmp1->nr_disks = 0; 869 tmp2->nr_disks = 0; 870 871 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 872abort: 873 kfree(tmp1); 874 kfree(tmp2); 875 return ret; 876} 877 878 879static u32 md_csum_fold(u32 csum) 880{ 881 csum = (csum & 0xffff) + (csum >> 16); 882 return (csum & 0xffff) + (csum >> 16); 883} 884 885static unsigned int calc_sb_csum(mdp_super_t * sb) 886{ 887 u64 newcsum = 0; 888 u32 *sb32 = (u32*)sb; 889 int i; 890 unsigned int disk_csum, csum; 891 892 disk_csum = sb->sb_csum; 893 sb->sb_csum = 0; 894 895 for (i = 0; i < MD_SB_BYTES/4 ; i++) 896 newcsum += sb32[i]; 897 csum = (newcsum & 0xffffffff) + (newcsum>>32); 898 899 900#ifdef CONFIG_ALPHA 901 /* This used to use csum_partial, which was wrong for several 902 * reasons including that different results are returned on 903 * different architectures. It isn't critical that we get exactly 904 * the same return value as before (we always csum_fold before 905 * testing, and that removes any differences). However as we 906 * know that csum_partial always returned a 16bit value on 907 * alphas, do a fold to maximise conformity to previous behaviour. 908 */ 909 sb->sb_csum = md_csum_fold(disk_csum); 910#else 911 sb->sb_csum = disk_csum; 912#endif 913 return csum; 914} 915 916 917/* 918 * Handle superblock details. 919 * We want to be able to handle multiple superblock formats 920 * so we have a common interface to them all, and an array of 921 * different handlers. 922 * We rely on user-space to write the initial superblock, and support 923 * reading and updating of superblocks. 924 * Interface methods are: 925 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 926 * loads and validates a superblock on dev. 927 * if refdev != NULL, compare superblocks on both devices 928 * Return: 929 * 0 - dev has a superblock that is compatible with refdev 930 * 1 - dev has a superblock that is compatible and newer than refdev 931 * so dev should be used as the refdev in future 932 * -EINVAL superblock incompatible or invalid 933 * -othererror e.g. -EIO 934 * 935 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 936 * Verify that dev is acceptable into mddev. 937 * The first time, mddev->raid_disks will be 0, and data from 938 * dev should be merged in. Subsequent calls check that dev 939 * is new enough. Return 0 or -EINVAL 940 * 941 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 942 * Update the superblock for rdev with data in mddev 943 * This does not write to disc. 944 * 945 */ 946 947struct super_type { 948 char *name; 949 struct module *owner; 950 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 951 int minor_version); 952 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 953 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 954 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 955 sector_t num_sectors); 956}; 957 958/* 959 * Check that the given mddev has no bitmap. 960 * 961 * This function is called from the run method of all personalities that do not 962 * support bitmaps. It prints an error message and returns non-zero if mddev 963 * has a bitmap. Otherwise, it returns 0. 964 * 965 */ 966int md_check_no_bitmap(mddev_t *mddev) 967{ 968 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 969 return 0; 970 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 971 mdname(mddev), mddev->pers->name); 972 return 1; 973} 974EXPORT_SYMBOL(md_check_no_bitmap); 975 976/* 977 * load_super for 0.90.0 978 */ 979static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 980{ 981 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 982 mdp_super_t *sb; 983 int ret; 984 985 /* 986 * Calculate the position of the superblock (512byte sectors), 987 * it's at the end of the disk. 988 * 989 * It also happens to be a multiple of 4Kb. 990 */ 991 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 992 993 ret = read_disk_sb(rdev, MD_SB_BYTES); 994 if (ret) return ret; 995 996 ret = -EINVAL; 997 998 bdevname(rdev->bdev, b); 999 sb = (mdp_super_t*)page_address(rdev->sb_page); 1000 1001 if (sb->md_magic != MD_SB_MAGIC) { 1002 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1003 b); 1004 goto abort; 1005 } 1006 1007 if (sb->major_version != 0 || 1008 sb->minor_version < 90 || 1009 sb->minor_version > 91) { 1010 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1011 sb->major_version, sb->minor_version, 1012 b); 1013 goto abort; 1014 } 1015 1016 if (sb->raid_disks <= 0) 1017 goto abort; 1018 1019 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1020 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1021 b); 1022 goto abort; 1023 } 1024 1025 rdev->preferred_minor = sb->md_minor; 1026 rdev->data_offset = 0; 1027 rdev->sb_size = MD_SB_BYTES; 1028 1029 if (sb->level == LEVEL_MULTIPATH) 1030 rdev->desc_nr = -1; 1031 else 1032 rdev->desc_nr = sb->this_disk.number; 1033 1034 if (!refdev) { 1035 ret = 1; 1036 } else { 1037 __u64 ev1, ev2; 1038 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 1039 if (!uuid_equal(refsb, sb)) { 1040 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1041 b, bdevname(refdev->bdev,b2)); 1042 goto abort; 1043 } 1044 if (!sb_equal(refsb, sb)) { 1045 printk(KERN_WARNING "md: %s has same UUID" 1046 " but different superblock to %s\n", 1047 b, bdevname(refdev->bdev, b2)); 1048 goto abort; 1049 } 1050 ev1 = md_event(sb); 1051 ev2 = md_event(refsb); 1052 if (ev1 > ev2) 1053 ret = 1; 1054 else 1055 ret = 0; 1056 } 1057 rdev->sectors = rdev->sb_start; 1058 1059 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1060 /* "this cannot possibly happen" ... */ 1061 ret = -EINVAL; 1062 1063 abort: 1064 return ret; 1065} 1066 1067/* 1068 * validate_super for 0.90.0 1069 */ 1070static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1071{ 1072 mdp_disk_t *desc; 1073 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 1074 __u64 ev1 = md_event(sb); 1075 1076 rdev->raid_disk = -1; 1077 clear_bit(Faulty, &rdev->flags); 1078 clear_bit(In_sync, &rdev->flags); 1079 clear_bit(WriteMostly, &rdev->flags); 1080 clear_bit(BarriersNotsupp, &rdev->flags); 1081 1082 if (mddev->raid_disks == 0) { 1083 mddev->major_version = 0; 1084 mddev->minor_version = sb->minor_version; 1085 mddev->patch_version = sb->patch_version; 1086 mddev->external = 0; 1087 mddev->chunk_sectors = sb->chunk_size >> 9; 1088 mddev->ctime = sb->ctime; 1089 mddev->utime = sb->utime; 1090 mddev->level = sb->level; 1091 mddev->clevel[0] = 0; 1092 mddev->layout = sb->layout; 1093 mddev->raid_disks = sb->raid_disks; 1094 mddev->dev_sectors = sb->size * 2; 1095 mddev->events = ev1; 1096 mddev->bitmap_info.offset = 0; 1097 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1098 1099 if (mddev->minor_version >= 91) { 1100 mddev->reshape_position = sb->reshape_position; 1101 mddev->delta_disks = sb->delta_disks; 1102 mddev->new_level = sb->new_level; 1103 mddev->new_layout = sb->new_layout; 1104 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1105 } else { 1106 mddev->reshape_position = MaxSector; 1107 mddev->delta_disks = 0; 1108 mddev->new_level = mddev->level; 1109 mddev->new_layout = mddev->layout; 1110 mddev->new_chunk_sectors = mddev->chunk_sectors; 1111 } 1112 1113 if (sb->state & (1<<MD_SB_CLEAN)) 1114 mddev->recovery_cp = MaxSector; 1115 else { 1116 if (sb->events_hi == sb->cp_events_hi && 1117 sb->events_lo == sb->cp_events_lo) { 1118 mddev->recovery_cp = sb->recovery_cp; 1119 } else 1120 mddev->recovery_cp = 0; 1121 } 1122 1123 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1124 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1125 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1126 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1127 1128 mddev->max_disks = MD_SB_DISKS; 1129 1130 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1131 mddev->bitmap_info.file == NULL) 1132 mddev->bitmap_info.offset = 1133 mddev->bitmap_info.default_offset; 1134 1135 } else if (mddev->pers == NULL) { 1136 /* Insist on good event counter while assembling, except 1137 * for spares (which don't need an event count) */ 1138 ++ev1; 1139 if (sb->disks[rdev->desc_nr].state & ( 1140 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1141 if (ev1 < mddev->events) 1142 return -EINVAL; 1143 } else if (mddev->bitmap) { 1144 /* if adding to array with a bitmap, then we can accept an 1145 * older device ... but not too old. 1146 */ 1147 if (ev1 < mddev->bitmap->events_cleared) 1148 return 0; 1149 } else { 1150 if (ev1 < mddev->events) 1151 /* just a hot-add of a new device, leave raid_disk at -1 */ 1152 return 0; 1153 } 1154 1155 if (mddev->level != LEVEL_MULTIPATH) { 1156 desc = sb->disks + rdev->desc_nr; 1157 1158 if (desc->state & (1<<MD_DISK_FAULTY)) 1159 set_bit(Faulty, &rdev->flags); 1160 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1161 desc->raid_disk < mddev->raid_disks */) { 1162 set_bit(In_sync, &rdev->flags); 1163 rdev->raid_disk = desc->raid_disk; 1164 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1165 /* active but not in sync implies recovery up to 1166 * reshape position. We don't know exactly where 1167 * that is, so set to zero for now */ 1168 if (mddev->minor_version >= 91) { 1169 rdev->recovery_offset = 0; 1170 rdev->raid_disk = desc->raid_disk; 1171 } 1172 } 1173 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1174 set_bit(WriteMostly, &rdev->flags); 1175 } else /* MULTIPATH are always insync */ 1176 set_bit(In_sync, &rdev->flags); 1177 return 0; 1178} 1179 1180/* 1181 * sync_super for 0.90.0 1182 */ 1183static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1184{ 1185 mdp_super_t *sb; 1186 mdk_rdev_t *rdev2; 1187 int next_spare = mddev->raid_disks; 1188 1189 1190 /* make rdev->sb match mddev data.. 1191 * 1192 * 1/ zero out disks 1193 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1194 * 3/ any empty disks < next_spare become removed 1195 * 1196 * disks[0] gets initialised to REMOVED because 1197 * we cannot be sure from other fields if it has 1198 * been initialised or not. 1199 */ 1200 int i; 1201 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1202 1203 rdev->sb_size = MD_SB_BYTES; 1204 1205 sb = (mdp_super_t*)page_address(rdev->sb_page); 1206 1207 memset(sb, 0, sizeof(*sb)); 1208 1209 sb->md_magic = MD_SB_MAGIC; 1210 sb->major_version = mddev->major_version; 1211 sb->patch_version = mddev->patch_version; 1212 sb->gvalid_words = 0; /* ignored */ 1213 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1214 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1215 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1216 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1217 1218 sb->ctime = mddev->ctime; 1219 sb->level = mddev->level; 1220 sb->size = mddev->dev_sectors / 2; 1221 sb->raid_disks = mddev->raid_disks; 1222 sb->md_minor = mddev->md_minor; 1223 sb->not_persistent = 0; 1224 sb->utime = mddev->utime; 1225 sb->state = 0; 1226 sb->events_hi = (mddev->events>>32); 1227 sb->events_lo = (u32)mddev->events; 1228 1229 if (mddev->reshape_position == MaxSector) 1230 sb->minor_version = 90; 1231 else { 1232 sb->minor_version = 91; 1233 sb->reshape_position = mddev->reshape_position; 1234 sb->new_level = mddev->new_level; 1235 sb->delta_disks = mddev->delta_disks; 1236 sb->new_layout = mddev->new_layout; 1237 sb->new_chunk = mddev->new_chunk_sectors << 9; 1238 } 1239 mddev->minor_version = sb->minor_version; 1240 if (mddev->in_sync) 1241 { 1242 sb->recovery_cp = mddev->recovery_cp; 1243 sb->cp_events_hi = (mddev->events>>32); 1244 sb->cp_events_lo = (u32)mddev->events; 1245 if (mddev->recovery_cp == MaxSector) 1246 sb->state = (1<< MD_SB_CLEAN); 1247 } else 1248 sb->recovery_cp = 0; 1249 1250 sb->layout = mddev->layout; 1251 sb->chunk_size = mddev->chunk_sectors << 9; 1252 1253 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1254 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1255 1256 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1257 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1258 mdp_disk_t *d; 1259 int desc_nr; 1260 int is_active = test_bit(In_sync, &rdev2->flags); 1261 1262 if (rdev2->raid_disk >= 0 && 1263 sb->minor_version >= 91) 1264 /* we have nowhere to store the recovery_offset, 1265 * but if it is not below the reshape_position, 1266 * we can piggy-back on that. 1267 */ 1268 is_active = 1; 1269 if (rdev2->raid_disk < 0 || 1270 test_bit(Faulty, &rdev2->flags)) 1271 is_active = 0; 1272 if (is_active) 1273 desc_nr = rdev2->raid_disk; 1274 else 1275 desc_nr = next_spare++; 1276 rdev2->desc_nr = desc_nr; 1277 d = &sb->disks[rdev2->desc_nr]; 1278 nr_disks++; 1279 d->number = rdev2->desc_nr; 1280 d->major = MAJOR(rdev2->bdev->bd_dev); 1281 d->minor = MINOR(rdev2->bdev->bd_dev); 1282 if (is_active) 1283 d->raid_disk = rdev2->raid_disk; 1284 else 1285 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1286 if (test_bit(Faulty, &rdev2->flags)) 1287 d->state = (1<<MD_DISK_FAULTY); 1288 else if (is_active) { 1289 d->state = (1<<MD_DISK_ACTIVE); 1290 if (test_bit(In_sync, &rdev2->flags)) 1291 d->state |= (1<<MD_DISK_SYNC); 1292 active++; 1293 working++; 1294 } else { 1295 d->state = 0; 1296 spare++; 1297 working++; 1298 } 1299 if (test_bit(WriteMostly, &rdev2->flags)) 1300 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1301 } 1302 /* now set the "removed" and "faulty" bits on any missing devices */ 1303 for (i=0 ; i < mddev->raid_disks ; i++) { 1304 mdp_disk_t *d = &sb->disks[i]; 1305 if (d->state == 0 && d->number == 0) { 1306 d->number = i; 1307 d->raid_disk = i; 1308 d->state = (1<<MD_DISK_REMOVED); 1309 d->state |= (1<<MD_DISK_FAULTY); 1310 failed++; 1311 } 1312 } 1313 sb->nr_disks = nr_disks; 1314 sb->active_disks = active; 1315 sb->working_disks = working; 1316 sb->failed_disks = failed; 1317 sb->spare_disks = spare; 1318 1319 sb->this_disk = sb->disks[rdev->desc_nr]; 1320 sb->sb_csum = calc_sb_csum(sb); 1321} 1322 1323/* 1324 * rdev_size_change for 0.90.0 1325 */ 1326static unsigned long long 1327super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1328{ 1329 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1330 return 0; /* component must fit device */ 1331 if (rdev->mddev->bitmap_info.offset) 1332 return 0; /* can't move bitmap */ 1333 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1334 if (!num_sectors || num_sectors > rdev->sb_start) 1335 num_sectors = rdev->sb_start; 1336 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1337 rdev->sb_page); 1338 md_super_wait(rdev->mddev); 1339 return num_sectors; 1340} 1341 1342 1343/* 1344 * version 1 superblock 1345 */ 1346 1347static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1348{ 1349 __le32 disk_csum; 1350 u32 csum; 1351 unsigned long long newcsum; 1352 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1353 __le32 *isuper = (__le32*)sb; 1354 int i; 1355 1356 disk_csum = sb->sb_csum; 1357 sb->sb_csum = 0; 1358 newcsum = 0; 1359 for (i=0; size>=4; size -= 4 ) 1360 newcsum += le32_to_cpu(*isuper++); 1361 1362 if (size == 2) 1363 newcsum += le16_to_cpu(*(__le16*) isuper); 1364 1365 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1366 sb->sb_csum = disk_csum; 1367 return cpu_to_le32(csum); 1368} 1369 1370static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1371{ 1372 struct mdp_superblock_1 *sb; 1373 int ret; 1374 sector_t sb_start; 1375 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1376 int bmask; 1377 1378 /* 1379 * Calculate the position of the superblock in 512byte sectors. 1380 * It is always aligned to a 4K boundary and 1381 * depeding on minor_version, it can be: 1382 * 0: At least 8K, but less than 12K, from end of device 1383 * 1: At start of device 1384 * 2: 4K from start of device. 1385 */ 1386 switch(minor_version) { 1387 case 0: 1388 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1389 sb_start -= 8*2; 1390 sb_start &= ~(sector_t)(4*2-1); 1391 break; 1392 case 1: 1393 sb_start = 0; 1394 break; 1395 case 2: 1396 sb_start = 8; 1397 break; 1398 default: 1399 return -EINVAL; 1400 } 1401 rdev->sb_start = sb_start; 1402 1403 /* superblock is rarely larger than 1K, but it can be larger, 1404 * and it is safe to read 4k, so we do that 1405 */ 1406 ret = read_disk_sb(rdev, 4096); 1407 if (ret) return ret; 1408 1409 1410 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1411 1412 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1413 sb->major_version != cpu_to_le32(1) || 1414 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1415 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1416 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1417 return -EINVAL; 1418 1419 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1420 printk("md: invalid superblock checksum on %s\n", 1421 bdevname(rdev->bdev,b)); 1422 return -EINVAL; 1423 } 1424 if (le64_to_cpu(sb->data_size) < 10) { 1425 printk("md: data_size too small on %s\n", 1426 bdevname(rdev->bdev,b)); 1427 return -EINVAL; 1428 } 1429 1430 rdev->preferred_minor = 0xffff; 1431 rdev->data_offset = le64_to_cpu(sb->data_offset); 1432 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1433 1434 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1435 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1436 if (rdev->sb_size & bmask) 1437 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1438 1439 if (minor_version 1440 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1441 return -EINVAL; 1442 1443 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1444 rdev->desc_nr = -1; 1445 else 1446 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1447 1448 if (!refdev) { 1449 ret = 1; 1450 } else { 1451 __u64 ev1, ev2; 1452 struct mdp_superblock_1 *refsb = 1453 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1454 1455 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1456 sb->level != refsb->level || 1457 sb->layout != refsb->layout || 1458 sb->chunksize != refsb->chunksize) { 1459 printk(KERN_WARNING "md: %s has strangely different" 1460 " superblock to %s\n", 1461 bdevname(rdev->bdev,b), 1462 bdevname(refdev->bdev,b2)); 1463 return -EINVAL; 1464 } 1465 ev1 = le64_to_cpu(sb->events); 1466 ev2 = le64_to_cpu(refsb->events); 1467 1468 if (ev1 > ev2) 1469 ret = 1; 1470 else 1471 ret = 0; 1472 } 1473 if (minor_version) 1474 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1475 le64_to_cpu(sb->data_offset); 1476 else 1477 rdev->sectors = rdev->sb_start; 1478 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1479 return -EINVAL; 1480 rdev->sectors = le64_to_cpu(sb->data_size); 1481 if (le64_to_cpu(sb->size) > rdev->sectors) 1482 return -EINVAL; 1483 return ret; 1484} 1485 1486static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1487{ 1488 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1489 __u64 ev1 = le64_to_cpu(sb->events); 1490 1491 rdev->raid_disk = -1; 1492 clear_bit(Faulty, &rdev->flags); 1493 clear_bit(In_sync, &rdev->flags); 1494 clear_bit(WriteMostly, &rdev->flags); 1495 clear_bit(BarriersNotsupp, &rdev->flags); 1496 1497 if (mddev->raid_disks == 0) { 1498 mddev->major_version = 1; 1499 mddev->patch_version = 0; 1500 mddev->external = 0; 1501 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1502 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1503 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1504 mddev->level = le32_to_cpu(sb->level); 1505 mddev->clevel[0] = 0; 1506 mddev->layout = le32_to_cpu(sb->layout); 1507 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1508 mddev->dev_sectors = le64_to_cpu(sb->size); 1509 mddev->events = ev1; 1510 mddev->bitmap_info.offset = 0; 1511 mddev->bitmap_info.default_offset = 1024 >> 9; 1512 1513 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1514 memcpy(mddev->uuid, sb->set_uuid, 16); 1515 1516 mddev->max_disks = (4096-256)/2; 1517 1518 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1519 mddev->bitmap_info.file == NULL ) 1520 mddev->bitmap_info.offset = 1521 (__s32)le32_to_cpu(sb->bitmap_offset); 1522 1523 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1524 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1525 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1526 mddev->new_level = le32_to_cpu(sb->new_level); 1527 mddev->new_layout = le32_to_cpu(sb->new_layout); 1528 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1529 } else { 1530 mddev->reshape_position = MaxSector; 1531 mddev->delta_disks = 0; 1532 mddev->new_level = mddev->level; 1533 mddev->new_layout = mddev->layout; 1534 mddev->new_chunk_sectors = mddev->chunk_sectors; 1535 } 1536 1537 } else if (mddev->pers == NULL) { 1538 /* Insist of good event counter while assembling, except for 1539 * spares (which don't need an event count) */ 1540 ++ev1; 1541 if (rdev->desc_nr >= 0 && 1542 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1543 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1544 if (ev1 < mddev->events) 1545 return -EINVAL; 1546 } else if (mddev->bitmap) { 1547 /* If adding to array with a bitmap, then we can accept an 1548 * older device, but not too old. 1549 */ 1550 if (ev1 < mddev->bitmap->events_cleared) 1551 return 0; 1552 } else { 1553 if (ev1 < mddev->events) 1554 /* just a hot-add of a new device, leave raid_disk at -1 */ 1555 return 0; 1556 } 1557 if (mddev->level != LEVEL_MULTIPATH) { 1558 int role; 1559 if (rdev->desc_nr < 0 || 1560 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1561 role = 0xffff; 1562 rdev->desc_nr = -1; 1563 } else 1564 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1565 switch(role) { 1566 case 0xffff: /* spare */ 1567 break; 1568 case 0xfffe: /* faulty */ 1569 set_bit(Faulty, &rdev->flags); 1570 break; 1571 default: 1572 if ((le32_to_cpu(sb->feature_map) & 1573 MD_FEATURE_RECOVERY_OFFSET)) 1574 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1575 else 1576 set_bit(In_sync, &rdev->flags); 1577 rdev->raid_disk = role; 1578 break; 1579 } 1580 if (sb->devflags & WriteMostly1) 1581 set_bit(WriteMostly, &rdev->flags); 1582 } else /* MULTIPATH are always insync */ 1583 set_bit(In_sync, &rdev->flags); 1584 1585 return 0; 1586} 1587 1588static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1589{ 1590 struct mdp_superblock_1 *sb; 1591 mdk_rdev_t *rdev2; 1592 int max_dev, i; 1593 /* make rdev->sb match mddev and rdev data. */ 1594 1595 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1596 1597 sb->feature_map = 0; 1598 sb->pad0 = 0; 1599 sb->recovery_offset = cpu_to_le64(0); 1600 memset(sb->pad1, 0, sizeof(sb->pad1)); 1601 memset(sb->pad2, 0, sizeof(sb->pad2)); 1602 memset(sb->pad3, 0, sizeof(sb->pad3)); 1603 1604 sb->utime = cpu_to_le64((__u64)mddev->utime); 1605 sb->events = cpu_to_le64(mddev->events); 1606 if (mddev->in_sync) 1607 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1608 else 1609 sb->resync_offset = cpu_to_le64(0); 1610 1611 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1612 1613 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1614 sb->size = cpu_to_le64(mddev->dev_sectors); 1615 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1616 sb->level = cpu_to_le32(mddev->level); 1617 sb->layout = cpu_to_le32(mddev->layout); 1618 1619 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1620 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1621 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1622 } 1623 1624 if (rdev->raid_disk >= 0 && 1625 !test_bit(In_sync, &rdev->flags)) { 1626 sb->feature_map |= 1627 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1628 sb->recovery_offset = 1629 cpu_to_le64(rdev->recovery_offset); 1630 } 1631 1632 if (mddev->reshape_position != MaxSector) { 1633 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1634 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1635 sb->new_layout = cpu_to_le32(mddev->new_layout); 1636 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1637 sb->new_level = cpu_to_le32(mddev->new_level); 1638 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1639 } 1640 1641 max_dev = 0; 1642 list_for_each_entry(rdev2, &mddev->disks, same_set) 1643 if (rdev2->desc_nr+1 > max_dev) 1644 max_dev = rdev2->desc_nr+1; 1645 1646 if (max_dev > le32_to_cpu(sb->max_dev)) { 1647 int bmask; 1648 sb->max_dev = cpu_to_le32(max_dev); 1649 rdev->sb_size = max_dev * 2 + 256; 1650 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1651 if (rdev->sb_size & bmask) 1652 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1653 } else 1654 max_dev = le32_to_cpu(sb->max_dev); 1655 1656 for (i=0; i<max_dev;i++) 1657 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1658 1659 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1660 i = rdev2->desc_nr; 1661 if (test_bit(Faulty, &rdev2->flags)) 1662 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1663 else if (test_bit(In_sync, &rdev2->flags)) 1664 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1665 else if (rdev2->raid_disk >= 0) 1666 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1667 else 1668 sb->dev_roles[i] = cpu_to_le16(0xffff); 1669 } 1670 1671 sb->sb_csum = calc_sb_1_csum(sb); 1672} 1673 1674static unsigned long long 1675super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1676{ 1677 struct mdp_superblock_1 *sb; 1678 sector_t max_sectors; 1679 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1680 return 0; /* component must fit device */ 1681 if (rdev->sb_start < rdev->data_offset) { 1682 /* minor versions 1 and 2; superblock before data */ 1683 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1684 max_sectors -= rdev->data_offset; 1685 if (!num_sectors || num_sectors > max_sectors) 1686 num_sectors = max_sectors; 1687 } else if (rdev->mddev->bitmap_info.offset) { 1688 /* minor version 0 with bitmap we can't move */ 1689 return 0; 1690 } else { 1691 /* minor version 0; superblock after data */ 1692 sector_t sb_start; 1693 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1694 sb_start &= ~(sector_t)(4*2 - 1); 1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1696 if (!num_sectors || num_sectors > max_sectors) 1697 num_sectors = max_sectors; 1698 rdev->sb_start = sb_start; 1699 } 1700 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1701 sb->data_size = cpu_to_le64(num_sectors); 1702 sb->super_offset = rdev->sb_start; 1703 sb->sb_csum = calc_sb_1_csum(sb); 1704 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1705 rdev->sb_page); 1706 md_super_wait(rdev->mddev); 1707 return num_sectors; 1708} 1709 1710static struct super_type super_types[] = { 1711 [0] = { 1712 .name = "0.90.0", 1713 .owner = THIS_MODULE, 1714 .load_super = super_90_load, 1715 .validate_super = super_90_validate, 1716 .sync_super = super_90_sync, 1717 .rdev_size_change = super_90_rdev_size_change, 1718 }, 1719 [1] = { 1720 .name = "md-1", 1721 .owner = THIS_MODULE, 1722 .load_super = super_1_load, 1723 .validate_super = super_1_validate, 1724 .sync_super = super_1_sync, 1725 .rdev_size_change = super_1_rdev_size_change, 1726 }, 1727}; 1728 1729static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1730{ 1731 mdk_rdev_t *rdev, *rdev2; 1732 1733 rcu_read_lock(); 1734 rdev_for_each_rcu(rdev, mddev1) 1735 rdev_for_each_rcu(rdev2, mddev2) 1736 if (rdev->bdev->bd_contains == 1737 rdev2->bdev->bd_contains) { 1738 rcu_read_unlock(); 1739 return 1; 1740 } 1741 rcu_read_unlock(); 1742 return 0; 1743} 1744 1745static LIST_HEAD(pending_raid_disks); 1746 1747/* 1748 * Try to register data integrity profile for an mddev 1749 * 1750 * This is called when an array is started and after a disk has been kicked 1751 * from the array. It only succeeds if all working and active component devices 1752 * are integrity capable with matching profiles. 1753 */ 1754int md_integrity_register(mddev_t *mddev) 1755{ 1756 mdk_rdev_t *rdev, *reference = NULL; 1757 1758 if (list_empty(&mddev->disks)) 1759 return 0; /* nothing to do */ 1760 if (blk_get_integrity(mddev->gendisk)) 1761 return 0; /* already registered */ 1762 list_for_each_entry(rdev, &mddev->disks, same_set) { 1763 /* skip spares and non-functional disks */ 1764 if (test_bit(Faulty, &rdev->flags)) 1765 continue; 1766 if (rdev->raid_disk < 0) 1767 continue; 1768 /* 1769 * If at least one rdev is not integrity capable, we can not 1770 * enable data integrity for the md device. 1771 */ 1772 if (!bdev_get_integrity(rdev->bdev)) 1773 return -EINVAL; 1774 if (!reference) { 1775 /* Use the first rdev as the reference */ 1776 reference = rdev; 1777 continue; 1778 } 1779 /* does this rdev's profile match the reference profile? */ 1780 if (blk_integrity_compare(reference->bdev->bd_disk, 1781 rdev->bdev->bd_disk) < 0) 1782 return -EINVAL; 1783 } 1784 /* 1785 * All component devices are integrity capable and have matching 1786 * profiles, register the common profile for the md device. 1787 */ 1788 if (blk_integrity_register(mddev->gendisk, 1789 bdev_get_integrity(reference->bdev)) != 0) { 1790 printk(KERN_ERR "md: failed to register integrity for %s\n", 1791 mdname(mddev)); 1792 return -EINVAL; 1793 } 1794 printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1795 mdname(mddev)); 1796 return 0; 1797} 1798EXPORT_SYMBOL(md_integrity_register); 1799 1800/* Disable data integrity if non-capable/non-matching disk is being added */ 1801void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 1802{ 1803 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1804 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1805 1806 if (!bi_mddev) /* nothing to do */ 1807 return; 1808 if (rdev->raid_disk < 0) /* skip spares */ 1809 return; 1810 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1811 rdev->bdev->bd_disk) >= 0) 1812 return; 1813 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 1814 blk_integrity_unregister(mddev->gendisk); 1815} 1816EXPORT_SYMBOL(md_integrity_add_rdev); 1817 1818static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1819{ 1820 char b[BDEVNAME_SIZE]; 1821 struct kobject *ko; 1822 char *s; 1823 int err; 1824 1825 if (rdev->mddev) { 1826 MD_BUG(); 1827 return -EINVAL; 1828 } 1829 1830 /* prevent duplicates */ 1831 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1832 return -EEXIST; 1833 1834 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 1835 if (rdev->sectors && (mddev->dev_sectors == 0 || 1836 rdev->sectors < mddev->dev_sectors)) { 1837 if (mddev->pers) { 1838 /* Cannot change size, so fail 1839 * If mddev->level <= 0, then we don't care 1840 * about aligning sizes (e.g. linear) 1841 */ 1842 if (mddev->level > 0) 1843 return -ENOSPC; 1844 } else 1845 mddev->dev_sectors = rdev->sectors; 1846 } 1847 1848 /* Verify rdev->desc_nr is unique. 1849 * If it is -1, assign a free number, else 1850 * check number is not in use 1851 */ 1852 if (rdev->desc_nr < 0) { 1853 int choice = 0; 1854 if (mddev->pers) choice = mddev->raid_disks; 1855 while (find_rdev_nr(mddev, choice)) 1856 choice++; 1857 rdev->desc_nr = choice; 1858 } else { 1859 if (find_rdev_nr(mddev, rdev->desc_nr)) 1860 return -EBUSY; 1861 } 1862 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 1863 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 1864 mdname(mddev), mddev->max_disks); 1865 return -EBUSY; 1866 } 1867 bdevname(rdev->bdev,b); 1868 while ( (s=strchr(b, '/')) != NULL) 1869 *s = '!'; 1870 1871 rdev->mddev = mddev; 1872 printk(KERN_INFO "md: bind<%s>\n", b); 1873 1874 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1875 goto fail; 1876 1877 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1878 if (sysfs_create_link(&rdev->kobj, ko, "block")) 1879 /* failure here is OK */; 1880 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 1881 1882 list_add_rcu(&rdev->same_set, &mddev->disks); 1883 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1884 1885 /* May as well allow recovery to be retried once */ 1886 mddev->recovery_disabled = 0; 1887 1888 return 0; 1889 1890 fail: 1891 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1892 b, mdname(mddev)); 1893 return err; 1894} 1895 1896static void md_delayed_delete(struct work_struct *ws) 1897{ 1898 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1899 kobject_del(&rdev->kobj); 1900 kobject_put(&rdev->kobj); 1901} 1902 1903static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1904{ 1905 char b[BDEVNAME_SIZE]; 1906 if (!rdev->mddev) { 1907 MD_BUG(); 1908 return; 1909 } 1910 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1911 list_del_rcu(&rdev->same_set); 1912 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1913 rdev->mddev = NULL; 1914 sysfs_remove_link(&rdev->kobj, "block"); 1915 sysfs_put(rdev->sysfs_state); 1916 rdev->sysfs_state = NULL; 1917 /* We need to delay this, otherwise we can deadlock when 1918 * writing to 'remove' to "dev/state". We also need 1919 * to delay it due to rcu usage. 1920 */ 1921 synchronize_rcu(); 1922 INIT_WORK(&rdev->del_work, md_delayed_delete); 1923 kobject_get(&rdev->kobj); 1924 schedule_work(&rdev->del_work); 1925} 1926 1927/* 1928 * prevent the device from being mounted, repartitioned or 1929 * otherwise reused by a RAID array (or any other kernel 1930 * subsystem), by bd_claiming the device. 1931 */ 1932static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1933{ 1934 int err = 0; 1935 struct block_device *bdev; 1936 char b[BDEVNAME_SIZE]; 1937 1938 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1939 if (IS_ERR(bdev)) { 1940 printk(KERN_ERR "md: could not open %s.\n", 1941 __bdevname(dev, b)); 1942 return PTR_ERR(bdev); 1943 } 1944 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); 1945 if (err) { 1946 printk(KERN_ERR "md: could not bd_claim %s.\n", 1947 bdevname(bdev, b)); 1948 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1949 return err; 1950 } 1951 if (!shared) 1952 set_bit(AllReserved, &rdev->flags); 1953 rdev->bdev = bdev; 1954 return err; 1955} 1956 1957static void unlock_rdev(mdk_rdev_t *rdev) 1958{ 1959 struct block_device *bdev = rdev->bdev; 1960 rdev->bdev = NULL; 1961 if (!bdev) 1962 MD_BUG(); 1963 bd_release(bdev); 1964 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1965} 1966 1967void md_autodetect_dev(dev_t dev); 1968 1969static void export_rdev(mdk_rdev_t * rdev) 1970{ 1971 char b[BDEVNAME_SIZE]; 1972 printk(KERN_INFO "md: export_rdev(%s)\n", 1973 bdevname(rdev->bdev,b)); 1974 if (rdev->mddev) 1975 MD_BUG(); 1976 free_disk_sb(rdev); 1977#ifndef MODULE 1978 if (test_bit(AutoDetected, &rdev->flags)) 1979 md_autodetect_dev(rdev->bdev->bd_dev); 1980#endif 1981 unlock_rdev(rdev); 1982 kobject_put(&rdev->kobj); 1983} 1984 1985static void kick_rdev_from_array(mdk_rdev_t * rdev) 1986{ 1987 unbind_rdev_from_array(rdev); 1988 export_rdev(rdev); 1989} 1990 1991static void export_array(mddev_t *mddev) 1992{ 1993 mdk_rdev_t *rdev, *tmp; 1994 1995 rdev_for_each(rdev, tmp, mddev) { 1996 if (!rdev->mddev) { 1997 MD_BUG(); 1998 continue; 1999 } 2000 kick_rdev_from_array(rdev); 2001 } 2002 if (!list_empty(&mddev->disks)) 2003 MD_BUG(); 2004 mddev->raid_disks = 0; 2005 mddev->major_version = 0; 2006} 2007 2008static void print_desc(mdp_disk_t *desc) 2009{ 2010 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2011 desc->major,desc->minor,desc->raid_disk,desc->state); 2012} 2013 2014static void print_sb_90(mdp_super_t *sb) 2015{ 2016 int i; 2017 2018 printk(KERN_INFO 2019 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2020 sb->major_version, sb->minor_version, sb->patch_version, 2021 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2022 sb->ctime); 2023 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2024 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2025 sb->md_minor, sb->layout, sb->chunk_size); 2026 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2027 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2028 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2029 sb->failed_disks, sb->spare_disks, 2030 sb->sb_csum, (unsigned long)sb->events_lo); 2031 2032 printk(KERN_INFO); 2033 for (i = 0; i < MD_SB_DISKS; i++) { 2034 mdp_disk_t *desc; 2035 2036 desc = sb->disks + i; 2037 if (desc->number || desc->major || desc->minor || 2038 desc->raid_disk || (desc->state && (desc->state != 4))) { 2039 printk(" D %2d: ", i); 2040 print_desc(desc); 2041 } 2042 } 2043 printk(KERN_INFO "md: THIS: "); 2044 print_desc(&sb->this_disk); 2045} 2046 2047static void print_sb_1(struct mdp_superblock_1 *sb) 2048{ 2049 __u8 *uuid; 2050 2051 uuid = sb->set_uuid; 2052 printk(KERN_INFO 2053 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2054 "md: Name: \"%s\" CT:%llu\n", 2055 le32_to_cpu(sb->major_version), 2056 le32_to_cpu(sb->feature_map), 2057 uuid, 2058 sb->set_name, 2059 (unsigned long long)le64_to_cpu(sb->ctime) 2060 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2061 2062 uuid = sb->device_uuid; 2063 printk(KERN_INFO 2064 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2065 " RO:%llu\n" 2066 "md: Dev:%08x UUID: %pU\n" 2067 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2068 "md: (MaxDev:%u) \n", 2069 le32_to_cpu(sb->level), 2070 (unsigned long long)le64_to_cpu(sb->size), 2071 le32_to_cpu(sb->raid_disks), 2072 le32_to_cpu(sb->layout), 2073 le32_to_cpu(sb->chunksize), 2074 (unsigned long long)le64_to_cpu(sb->data_offset), 2075 (unsigned long long)le64_to_cpu(sb->data_size), 2076 (unsigned long long)le64_to_cpu(sb->super_offset), 2077 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2078 le32_to_cpu(sb->dev_number), 2079 uuid, 2080 sb->devflags, 2081 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2082 (unsigned long long)le64_to_cpu(sb->events), 2083 (unsigned long long)le64_to_cpu(sb->resync_offset), 2084 le32_to_cpu(sb->sb_csum), 2085 le32_to_cpu(sb->max_dev) 2086 ); 2087} 2088 2089static void print_rdev(mdk_rdev_t *rdev, int major_version) 2090{ 2091 char b[BDEVNAME_SIZE]; 2092 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2093 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2094 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2095 rdev->desc_nr); 2096 if (rdev->sb_loaded) { 2097 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2098 switch (major_version) { 2099 case 0: 2100 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 2101 break; 2102 case 1: 2103 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 2104 break; 2105 } 2106 } else 2107 printk(KERN_INFO "md: no rdev superblock!\n"); 2108} 2109 2110static void md_print_devices(void) 2111{ 2112 struct list_head *tmp; 2113 mdk_rdev_t *rdev; 2114 mddev_t *mddev; 2115 char b[BDEVNAME_SIZE]; 2116 2117 printk("\n"); 2118 printk("md: **********************************\n"); 2119 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2120 printk("md: **********************************\n"); 2121 for_each_mddev(mddev, tmp) { 2122 2123 if (mddev->bitmap) 2124 bitmap_print_sb(mddev->bitmap); 2125 else 2126 printk("%s: ", mdname(mddev)); 2127 list_for_each_entry(rdev, &mddev->disks, same_set) 2128 printk("<%s>", bdevname(rdev->bdev,b)); 2129 printk("\n"); 2130 2131 list_for_each_entry(rdev, &mddev->disks, same_set) 2132 print_rdev(rdev, mddev->major_version); 2133 } 2134 printk("md: **********************************\n"); 2135 printk("\n"); 2136} 2137 2138 2139static void sync_sbs(mddev_t * mddev, int nospares) 2140{ 2141 /* Update each superblock (in-memory image), but 2142 * if we are allowed to, skip spares which already 2143 * have the right event counter, or have one earlier 2144 * (which would mean they aren't being marked as dirty 2145 * with the rest of the array) 2146 */ 2147 mdk_rdev_t *rdev; 2148 list_for_each_entry(rdev, &mddev->disks, same_set) { 2149 if (rdev->sb_events == mddev->events || 2150 (nospares && 2151 rdev->raid_disk < 0 && 2152 rdev->sb_events+1 == mddev->events)) { 2153 /* Don't update this superblock */ 2154 rdev->sb_loaded = 2; 2155 } else { 2156 super_types[mddev->major_version]. 2157 sync_super(mddev, rdev); 2158 rdev->sb_loaded = 1; 2159 } 2160 } 2161} 2162 2163static void md_update_sb(mddev_t * mddev, int force_change) 2164{ 2165 mdk_rdev_t *rdev; 2166 int sync_req; 2167 int nospares = 0; 2168 2169repeat: 2170 /* First make sure individual recovery_offsets are correct */ 2171 list_for_each_entry(rdev, &mddev->disks, same_set) { 2172 if (rdev->raid_disk >= 0 && 2173 mddev->delta_disks >= 0 && 2174 !test_bit(In_sync, &rdev->flags) && 2175 mddev->curr_resync_completed > rdev->recovery_offset) 2176 rdev->recovery_offset = mddev->curr_resync_completed; 2177 2178 } 2179 if (!mddev->persistent) { 2180 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2181 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2182 if (!mddev->external) 2183 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2184 wake_up(&mddev->sb_wait); 2185 return; 2186 } 2187 2188 spin_lock_irq(&mddev->write_lock); 2189 2190 mddev->utime = get_seconds(); 2191 2192 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2193 force_change = 1; 2194 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2195 /* just a clean<-> dirty transition, possibly leave spares alone, 2196 * though if events isn't the right even/odd, we will have to do 2197 * spares after all 2198 */ 2199 nospares = 1; 2200 if (force_change) 2201 nospares = 0; 2202 if (mddev->degraded) 2203 /* If the array is degraded, then skipping spares is both 2204 * dangerous and fairly pointless. 2205 * Dangerous because a device that was removed from the array 2206 * might have a event_count that still looks up-to-date, 2207 * so it can be re-added without a resync. 2208 * Pointless because if there are any spares to skip, 2209 * then a recovery will happen and soon that array won't 2210 * be degraded any more and the spare can go back to sleep then. 2211 */ 2212 nospares = 0; 2213 2214 sync_req = mddev->in_sync; 2215 2216 /* If this is just a dirty<->clean transition, and the array is clean 2217 * and 'events' is odd, we can roll back to the previous clean state */ 2218 if (nospares 2219 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2220 && mddev->can_decrease_events 2221 && mddev->events != 1) { 2222 mddev->events--; 2223 mddev->can_decrease_events = 0; 2224 } else { 2225 /* otherwise we have to go forward and ... */ 2226 mddev->events ++; 2227 mddev->can_decrease_events = nospares; 2228 } 2229 2230 if (!mddev->events) { 2231 /* 2232 * oops, this 64-bit counter should never wrap. 2233 * Either we are in around ~1 trillion A.C., assuming 2234 * 1 reboot per second, or we have a bug: 2235 */ 2236 MD_BUG(); 2237 mddev->events --; 2238 } 2239 sync_sbs(mddev, nospares); 2240 spin_unlock_irq(&mddev->write_lock); 2241 2242 dprintk(KERN_INFO 2243 "md: updating %s RAID superblock on device (in sync %d)\n", 2244 mdname(mddev),mddev->in_sync); 2245 2246 bitmap_update_sb(mddev->bitmap); 2247 list_for_each_entry(rdev, &mddev->disks, same_set) { 2248 char b[BDEVNAME_SIZE]; 2249 dprintk(KERN_INFO "md: "); 2250 if (rdev->sb_loaded != 1) 2251 continue; /* no noise on spare devices */ 2252 if (test_bit(Faulty, &rdev->flags)) 2253 dprintk("(skipping faulty "); 2254 2255 dprintk("%s ", bdevname(rdev->bdev,b)); 2256 if (!test_bit(Faulty, &rdev->flags)) { 2257 md_super_write(mddev,rdev, 2258 rdev->sb_start, rdev->sb_size, 2259 rdev->sb_page); 2260 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 2261 bdevname(rdev->bdev,b), 2262 (unsigned long long)rdev->sb_start); 2263 rdev->sb_events = mddev->events; 2264 2265 } else 2266 dprintk(")\n"); 2267 if (mddev->level == LEVEL_MULTIPATH) 2268 /* only need to write one superblock... */ 2269 break; 2270 } 2271 md_super_wait(mddev); 2272 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2273 2274 spin_lock_irq(&mddev->write_lock); 2275 if (mddev->in_sync != sync_req || 2276 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2277 /* have to write it out again */ 2278 spin_unlock_irq(&mddev->write_lock); 2279 goto repeat; 2280 } 2281 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2282 spin_unlock_irq(&mddev->write_lock); 2283 wake_up(&mddev->sb_wait); 2284 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2285 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2286 2287} 2288 2289/* words written to sysfs files may, or may not, be \n terminated. 2290 * We want to accept with case. For this we use cmd_match. 2291 */ 2292static int cmd_match(const char *cmd, const char *str) 2293{ 2294 /* See if cmd, written into a sysfs file, matches 2295 * str. They must either be the same, or cmd can 2296 * have a trailing newline 2297 */ 2298 while (*cmd && *str && *cmd == *str) { 2299 cmd++; 2300 str++; 2301 } 2302 if (*cmd == '\n') 2303 cmd++; 2304 if (*str || *cmd) 2305 return 0; 2306 return 1; 2307} 2308 2309struct rdev_sysfs_entry { 2310 struct attribute attr; 2311 ssize_t (*show)(mdk_rdev_t *, char *); 2312 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2313}; 2314 2315static ssize_t 2316state_show(mdk_rdev_t *rdev, char *page) 2317{ 2318 char *sep = ""; 2319 size_t len = 0; 2320 2321 if (test_bit(Faulty, &rdev->flags)) { 2322 len+= sprintf(page+len, "%sfaulty",sep); 2323 sep = ","; 2324 } 2325 if (test_bit(In_sync, &rdev->flags)) { 2326 len += sprintf(page+len, "%sin_sync",sep); 2327 sep = ","; 2328 } 2329 if (test_bit(WriteMostly, &rdev->flags)) { 2330 len += sprintf(page+len, "%swrite_mostly",sep); 2331 sep = ","; 2332 } 2333 if (test_bit(Blocked, &rdev->flags)) { 2334 len += sprintf(page+len, "%sblocked", sep); 2335 sep = ","; 2336 } 2337 if (!test_bit(Faulty, &rdev->flags) && 2338 !test_bit(In_sync, &rdev->flags)) { 2339 len += sprintf(page+len, "%sspare", sep); 2340 sep = ","; 2341 } 2342 return len+sprintf(page+len, "\n"); 2343} 2344 2345static ssize_t 2346state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2347{ 2348 /* can write 2349 * faulty - simulates and error 2350 * remove - disconnects the device 2351 * writemostly - sets write_mostly 2352 * -writemostly - clears write_mostly 2353 * blocked - sets the Blocked flag 2354 * -blocked - clears the Blocked flag 2355 * insync - sets Insync providing device isn't active 2356 */ 2357 int err = -EINVAL; 2358 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2359 md_error(rdev->mddev, rdev); 2360 err = 0; 2361 } else if (cmd_match(buf, "remove")) { 2362 if (rdev->raid_disk >= 0) 2363 err = -EBUSY; 2364 else { 2365 mddev_t *mddev = rdev->mddev; 2366 kick_rdev_from_array(rdev); 2367 if (mddev->pers) 2368 md_update_sb(mddev, 1); 2369 md_new_event(mddev); 2370 err = 0; 2371 } 2372 } else if (cmd_match(buf, "writemostly")) { 2373 set_bit(WriteMostly, &rdev->flags); 2374 err = 0; 2375 } else if (cmd_match(buf, "-writemostly")) { 2376 clear_bit(WriteMostly, &rdev->flags); 2377 err = 0; 2378 } else if (cmd_match(buf, "blocked")) { 2379 set_bit(Blocked, &rdev->flags); 2380 err = 0; 2381 } else if (cmd_match(buf, "-blocked")) { 2382 clear_bit(Blocked, &rdev->flags); 2383 wake_up(&rdev->blocked_wait); 2384 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2385 md_wakeup_thread(rdev->mddev->thread); 2386 2387 err = 0; 2388 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2389 set_bit(In_sync, &rdev->flags); 2390 err = 0; 2391 } 2392 if (!err) 2393 sysfs_notify_dirent_safe(rdev->sysfs_state); 2394 return err ? err : len; 2395} 2396static struct rdev_sysfs_entry rdev_state = 2397__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2398 2399static ssize_t 2400errors_show(mdk_rdev_t *rdev, char *page) 2401{ 2402 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2403} 2404 2405static ssize_t 2406errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2407{ 2408 char *e; 2409 unsigned long n = simple_strtoul(buf, &e, 10); 2410 if (*buf && (*e == 0 || *e == '\n')) { 2411 atomic_set(&rdev->corrected_errors, n); 2412 return len; 2413 } 2414 return -EINVAL; 2415} 2416static struct rdev_sysfs_entry rdev_errors = 2417__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2418 2419static ssize_t 2420slot_show(mdk_rdev_t *rdev, char *page) 2421{ 2422 if (rdev->raid_disk < 0) 2423 return sprintf(page, "none\n"); 2424 else 2425 return sprintf(page, "%d\n", rdev->raid_disk); 2426} 2427 2428static ssize_t 2429slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2430{ 2431 char *e; 2432 int err; 2433 char nm[20]; 2434 int slot = simple_strtoul(buf, &e, 10); 2435 if (strncmp(buf, "none", 4)==0) 2436 slot = -1; 2437 else if (e==buf || (*e && *e!= '\n')) 2438 return -EINVAL; 2439 if (rdev->mddev->pers && slot == -1) { 2440 /* Setting 'slot' on an active array requires also 2441 * updating the 'rd%d' link, and communicating 2442 * with the personality with ->hot_*_disk. 2443 * For now we only support removing 2444 * failed/spare devices. This normally happens automatically, 2445 * but not when the metadata is externally managed. 2446 */ 2447 if (rdev->raid_disk == -1) 2448 return -EEXIST; 2449 /* personality does all needed checks */ 2450 if (rdev->mddev->pers->hot_add_disk == NULL) 2451 return -EINVAL; 2452 err = rdev->mddev->pers-> 2453 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2454 if (err) 2455 return err; 2456 sprintf(nm, "rd%d", rdev->raid_disk); 2457 sysfs_remove_link(&rdev->mddev->kobj, nm); 2458 rdev->raid_disk = -1; 2459 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2460 md_wakeup_thread(rdev->mddev->thread); 2461 } else if (rdev->mddev->pers) { 2462 mdk_rdev_t *rdev2; 2463 /* Activating a spare .. or possibly reactivating 2464 * if we ever get bitmaps working here. 2465 */ 2466 2467 if (rdev->raid_disk != -1) 2468 return -EBUSY; 2469 2470 if (rdev->mddev->pers->hot_add_disk == NULL) 2471 return -EINVAL; 2472 2473 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2474 if (rdev2->raid_disk == slot) 2475 return -EEXIST; 2476 2477 rdev->raid_disk = slot; 2478 if (test_bit(In_sync, &rdev->flags)) 2479 rdev->saved_raid_disk = slot; 2480 else 2481 rdev->saved_raid_disk = -1; 2482 err = rdev->mddev->pers-> 2483 hot_add_disk(rdev->mddev, rdev); 2484 if (err) { 2485 rdev->raid_disk = -1; 2486 return err; 2487 } else 2488 sysfs_notify_dirent_safe(rdev->sysfs_state); 2489 sprintf(nm, "rd%d", rdev->raid_disk); 2490 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2491 /* failure here is OK */; 2492 /* don't wakeup anyone, leave that to userspace. */ 2493 } else { 2494 if (slot >= rdev->mddev->raid_disks) 2495 return -ENOSPC; 2496 rdev->raid_disk = slot; 2497 /* assume it is working */ 2498 clear_bit(Faulty, &rdev->flags); 2499 clear_bit(WriteMostly, &rdev->flags); 2500 set_bit(In_sync, &rdev->flags); 2501 sysfs_notify_dirent_safe(rdev->sysfs_state); 2502 } 2503 return len; 2504} 2505 2506 2507static struct rdev_sysfs_entry rdev_slot = 2508__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2509 2510static ssize_t 2511offset_show(mdk_rdev_t *rdev, char *page) 2512{ 2513 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2514} 2515 2516static ssize_t 2517offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2518{ 2519 char *e; 2520 unsigned long long offset = simple_strtoull(buf, &e, 10); 2521 if (e==buf || (*e && *e != '\n')) 2522 return -EINVAL; 2523 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2524 return -EBUSY; 2525 if (rdev->sectors && rdev->mddev->external) 2526 /* Must set offset before size, so overlap checks 2527 * can be sane */ 2528 return -EBUSY; 2529 rdev->data_offset = offset; 2530 return len; 2531} 2532 2533static struct rdev_sysfs_entry rdev_offset = 2534__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2535 2536static ssize_t 2537rdev_size_show(mdk_rdev_t *rdev, char *page) 2538{ 2539 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2540} 2541 2542static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2543{ 2544 /* check if two start/length pairs overlap */ 2545 if (s1+l1 <= s2) 2546 return 0; 2547 if (s2+l2 <= s1) 2548 return 0; 2549 return 1; 2550} 2551 2552static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2553{ 2554 unsigned long long blocks; 2555 sector_t new; 2556 2557 if (strict_strtoull(buf, 10, &blocks) < 0) 2558 return -EINVAL; 2559 2560 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2561 return -EINVAL; /* sector conversion overflow */ 2562 2563 new = blocks * 2; 2564 if (new != blocks * 2) 2565 return -EINVAL; /* unsigned long long to sector_t overflow */ 2566 2567 *sectors = new; 2568 return 0; 2569} 2570 2571static ssize_t 2572rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2573{ 2574 mddev_t *my_mddev = rdev->mddev; 2575 sector_t oldsectors = rdev->sectors; 2576 sector_t sectors; 2577 2578 if (strict_blocks_to_sectors(buf, §ors) < 0) 2579 return -EINVAL; 2580 if (my_mddev->pers && rdev->raid_disk >= 0) { 2581 if (my_mddev->persistent) { 2582 sectors = super_types[my_mddev->major_version]. 2583 rdev_size_change(rdev, sectors); 2584 if (!sectors) 2585 return -EBUSY; 2586 } else if (!sectors) 2587 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2588 rdev->data_offset; 2589 } 2590 if (sectors < my_mddev->dev_sectors) 2591 return -EINVAL; /* component must fit device */ 2592 2593 rdev->sectors = sectors; 2594 if (sectors > oldsectors && my_mddev->external) { 2595 /* need to check that all other rdevs with the same ->bdev 2596 * do not overlap. We need to unlock the mddev to avoid 2597 * a deadlock. We have already changed rdev->sectors, and if 2598 * we have to change it back, we will have the lock again. 2599 */ 2600 mddev_t *mddev; 2601 int overlap = 0; 2602 struct list_head *tmp; 2603 2604 mddev_unlock(my_mddev); 2605 for_each_mddev(mddev, tmp) { 2606 mdk_rdev_t *rdev2; 2607 2608 mddev_lock(mddev); 2609 list_for_each_entry(rdev2, &mddev->disks, same_set) 2610 if (test_bit(AllReserved, &rdev2->flags) || 2611 (rdev->bdev == rdev2->bdev && 2612 rdev != rdev2 && 2613 overlaps(rdev->data_offset, rdev->sectors, 2614 rdev2->data_offset, 2615 rdev2->sectors))) { 2616 overlap = 1; 2617 break; 2618 } 2619 mddev_unlock(mddev); 2620 if (overlap) { 2621 mddev_put(mddev); 2622 break; 2623 } 2624 } 2625 mddev_lock(my_mddev); 2626 if (overlap) { 2627 /* Someone else could have slipped in a size 2628 * change here, but doing so is just silly. 2629 * We put oldsectors back because we *know* it is 2630 * safe, and trust userspace not to race with 2631 * itself 2632 */ 2633 rdev->sectors = oldsectors; 2634 return -EBUSY; 2635 } 2636 } 2637 return len; 2638} 2639 2640static struct rdev_sysfs_entry rdev_size = 2641__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2642 2643 2644static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) 2645{ 2646 unsigned long long recovery_start = rdev->recovery_offset; 2647 2648 if (test_bit(In_sync, &rdev->flags) || 2649 recovery_start == MaxSector) 2650 return sprintf(page, "none\n"); 2651 2652 return sprintf(page, "%llu\n", recovery_start); 2653} 2654 2655static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2656{ 2657 unsigned long long recovery_start; 2658 2659 if (cmd_match(buf, "none")) 2660 recovery_start = MaxSector; 2661 else if (strict_strtoull(buf, 10, &recovery_start)) 2662 return -EINVAL; 2663 2664 if (rdev->mddev->pers && 2665 rdev->raid_disk >= 0) 2666 return -EBUSY; 2667 2668 rdev->recovery_offset = recovery_start; 2669 if (recovery_start == MaxSector) 2670 set_bit(In_sync, &rdev->flags); 2671 else 2672 clear_bit(In_sync, &rdev->flags); 2673 return len; 2674} 2675 2676static struct rdev_sysfs_entry rdev_recovery_start = 2677__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2678 2679static struct attribute *rdev_default_attrs[] = { 2680 &rdev_state.attr, 2681 &rdev_errors.attr, 2682 &rdev_slot.attr, 2683 &rdev_offset.attr, 2684 &rdev_size.attr, 2685 &rdev_recovery_start.attr, 2686 NULL, 2687}; 2688static ssize_t 2689rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2690{ 2691 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2692 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2693 mddev_t *mddev = rdev->mddev; 2694 ssize_t rv; 2695 2696 if (!entry->show) 2697 return -EIO; 2698 2699 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2700 if (!rv) { 2701 if (rdev->mddev == NULL) 2702 rv = -EBUSY; 2703 else 2704 rv = entry->show(rdev, page); 2705 mddev_unlock(mddev); 2706 } 2707 return rv; 2708} 2709 2710static ssize_t 2711rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2712 const char *page, size_t length) 2713{ 2714 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2715 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2716 ssize_t rv; 2717 mddev_t *mddev = rdev->mddev; 2718 2719 if (!entry->store) 2720 return -EIO; 2721 if (!capable(CAP_SYS_ADMIN)) 2722 return -EACCES; 2723 rv = mddev ? mddev_lock(mddev): -EBUSY; 2724 if (!rv) { 2725 if (rdev->mddev == NULL) 2726 rv = -EBUSY; 2727 else 2728 rv = entry->store(rdev, page, length); 2729 mddev_unlock(mddev); 2730 } 2731 return rv; 2732} 2733 2734static void rdev_free(struct kobject *ko) 2735{ 2736 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2737 kfree(rdev); 2738} 2739static const struct sysfs_ops rdev_sysfs_ops = { 2740 .show = rdev_attr_show, 2741 .store = rdev_attr_store, 2742}; 2743static struct kobj_type rdev_ktype = { 2744 .release = rdev_free, 2745 .sysfs_ops = &rdev_sysfs_ops, 2746 .default_attrs = rdev_default_attrs, 2747}; 2748 2749void md_rdev_init(mdk_rdev_t *rdev) 2750{ 2751 rdev->desc_nr = -1; 2752 rdev->saved_raid_disk = -1; 2753 rdev->raid_disk = -1; 2754 rdev->flags = 0; 2755 rdev->data_offset = 0; 2756 rdev->sb_events = 0; 2757 rdev->last_read_error.tv_sec = 0; 2758 rdev->last_read_error.tv_nsec = 0; 2759 atomic_set(&rdev->nr_pending, 0); 2760 atomic_set(&rdev->read_errors, 0); 2761 atomic_set(&rdev->corrected_errors, 0); 2762 2763 INIT_LIST_HEAD(&rdev->same_set); 2764 init_waitqueue_head(&rdev->blocked_wait); 2765} 2766EXPORT_SYMBOL_GPL(md_rdev_init); 2767/* 2768 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2769 * 2770 * mark the device faulty if: 2771 * 2772 * - the device is nonexistent (zero size) 2773 * - the device has no valid superblock 2774 * 2775 * a faulty rdev _never_ has rdev->sb set. 2776 */ 2777static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2778{ 2779 char b[BDEVNAME_SIZE]; 2780 int err; 2781 mdk_rdev_t *rdev; 2782 sector_t size; 2783 2784 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2785 if (!rdev) { 2786 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2787 return ERR_PTR(-ENOMEM); 2788 } 2789 2790 md_rdev_init(rdev); 2791 if ((err = alloc_disk_sb(rdev))) 2792 goto abort_free; 2793 2794 err = lock_rdev(rdev, newdev, super_format == -2); 2795 if (err) 2796 goto abort_free; 2797 2798 kobject_init(&rdev->kobj, &rdev_ktype); 2799 2800 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2801 if (!size) { 2802 printk(KERN_WARNING 2803 "md: %s has zero or unknown size, marking faulty!\n", 2804 bdevname(rdev->bdev,b)); 2805 err = -EINVAL; 2806 goto abort_free; 2807 } 2808 2809 if (super_format >= 0) { 2810 err = super_types[super_format]. 2811 load_super(rdev, NULL, super_minor); 2812 if (err == -EINVAL) { 2813 printk(KERN_WARNING 2814 "md: %s does not have a valid v%d.%d " 2815 "superblock, not importing!\n", 2816 bdevname(rdev->bdev,b), 2817 super_format, super_minor); 2818 goto abort_free; 2819 } 2820 if (err < 0) { 2821 printk(KERN_WARNING 2822 "md: could not read %s's sb, not importing!\n", 2823 bdevname(rdev->bdev,b)); 2824 goto abort_free; 2825 } 2826 } 2827 2828 return rdev; 2829 2830abort_free: 2831 if (rdev->sb_page) { 2832 if (rdev->bdev) 2833 unlock_rdev(rdev); 2834 free_disk_sb(rdev); 2835 } 2836 kfree(rdev); 2837 return ERR_PTR(err); 2838} 2839 2840/* 2841 * Check a full RAID array for plausibility 2842 */ 2843 2844 2845static void analyze_sbs(mddev_t * mddev) 2846{ 2847 int i; 2848 mdk_rdev_t *rdev, *freshest, *tmp; 2849 char b[BDEVNAME_SIZE]; 2850 2851 freshest = NULL; 2852 rdev_for_each(rdev, tmp, mddev) 2853 switch (super_types[mddev->major_version]. 2854 load_super(rdev, freshest, mddev->minor_version)) { 2855 case 1: 2856 freshest = rdev; 2857 break; 2858 case 0: 2859 break; 2860 default: 2861 printk( KERN_ERR \ 2862 "md: fatal superblock inconsistency in %s" 2863 " -- removing from array\n", 2864 bdevname(rdev->bdev,b)); 2865 kick_rdev_from_array(rdev); 2866 } 2867 2868 2869 super_types[mddev->major_version]. 2870 validate_super(mddev, freshest); 2871 2872 i = 0; 2873 rdev_for_each(rdev, tmp, mddev) { 2874 if (mddev->max_disks && 2875 (rdev->desc_nr >= mddev->max_disks || 2876 i > mddev->max_disks)) { 2877 printk(KERN_WARNING 2878 "md: %s: %s: only %d devices permitted\n", 2879 mdname(mddev), bdevname(rdev->bdev, b), 2880 mddev->max_disks); 2881 kick_rdev_from_array(rdev); 2882 continue; 2883 } 2884 if (rdev != freshest) 2885 if (super_types[mddev->major_version]. 2886 validate_super(mddev, rdev)) { 2887 printk(KERN_WARNING "md: kicking non-fresh %s" 2888 " from array!\n", 2889 bdevname(rdev->bdev,b)); 2890 kick_rdev_from_array(rdev); 2891 continue; 2892 } 2893 if (mddev->level == LEVEL_MULTIPATH) { 2894 rdev->desc_nr = i++; 2895 rdev->raid_disk = rdev->desc_nr; 2896 set_bit(In_sync, &rdev->flags); 2897 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 2898 rdev->raid_disk = -1; 2899 clear_bit(In_sync, &rdev->flags); 2900 } 2901 } 2902} 2903 2904/* Read a fixed-point number. 2905 * Numbers in sysfs attributes should be in "standard" units where 2906 * possible, so time should be in seconds. 2907 * However we internally use a a much smaller unit such as 2908 * milliseconds or jiffies. 2909 * This function takes a decimal number with a possible fractional 2910 * component, and produces an integer which is the result of 2911 * multiplying that number by 10^'scale'. 2912 * all without any floating-point arithmetic. 2913 */ 2914int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 2915{ 2916 unsigned long result = 0; 2917 long decimals = -1; 2918 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 2919 if (*cp == '.') 2920 decimals = 0; 2921 else if (decimals < scale) { 2922 unsigned int value; 2923 value = *cp - '0'; 2924 result = result * 10 + value; 2925 if (decimals >= 0) 2926 decimals++; 2927 } 2928 cp++; 2929 } 2930 if (*cp == '\n') 2931 cp++; 2932 if (*cp) 2933 return -EINVAL; 2934 if (decimals < 0) 2935 decimals = 0; 2936 while (decimals < scale) { 2937 result *= 10; 2938 decimals ++; 2939 } 2940 *res = result; 2941 return 0; 2942} 2943 2944 2945static void md_safemode_timeout(unsigned long data); 2946 2947static ssize_t 2948safe_delay_show(mddev_t *mddev, char *page) 2949{ 2950 int msec = (mddev->safemode_delay*1000)/HZ; 2951 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2952} 2953static ssize_t 2954safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2955{ 2956 unsigned long msec; 2957 2958 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 2959 return -EINVAL; 2960 if (msec == 0) 2961 mddev->safemode_delay = 0; 2962 else { 2963 unsigned long old_delay = mddev->safemode_delay; 2964 mddev->safemode_delay = (msec*HZ)/1000; 2965 if (mddev->safemode_delay == 0) 2966 mddev->safemode_delay = 1; 2967 if (mddev->safemode_delay < old_delay) 2968 md_safemode_timeout((unsigned long)mddev); 2969 } 2970 return len; 2971} 2972static struct md_sysfs_entry md_safe_delay = 2973__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2974 2975static ssize_t 2976level_show(mddev_t *mddev, char *page) 2977{ 2978 struct mdk_personality *p = mddev->pers; 2979 if (p) 2980 return sprintf(page, "%s\n", p->name); 2981 else if (mddev->clevel[0]) 2982 return sprintf(page, "%s\n", mddev->clevel); 2983 else if (mddev->level != LEVEL_NONE) 2984 return sprintf(page, "%d\n", mddev->level); 2985 else 2986 return 0; 2987} 2988 2989static ssize_t 2990level_store(mddev_t *mddev, const char *buf, size_t len) 2991{ 2992 char clevel[16]; 2993 ssize_t rv = len; 2994 struct mdk_personality *pers; 2995 long level; 2996 void *priv; 2997 mdk_rdev_t *rdev; 2998 2999 if (mddev->pers == NULL) { 3000 if (len == 0) 3001 return 0; 3002 if (len >= sizeof(mddev->clevel)) 3003 return -ENOSPC; 3004 strncpy(mddev->clevel, buf, len); 3005 if (mddev->clevel[len-1] == '\n') 3006 len--; 3007 mddev->clevel[len] = 0; 3008 mddev->level = LEVEL_NONE; 3009 return rv; 3010 } 3011 3012 /* request to change the personality. Need to ensure: 3013 * - array is not engaged in resync/recovery/reshape 3014 * - old personality can be suspended 3015 * - new personality will access other array. 3016 */ 3017 3018 if (mddev->sync_thread || 3019 mddev->reshape_position != MaxSector || 3020 mddev->sysfs_active) 3021 return -EBUSY; 3022 3023 if (!mddev->pers->quiesce) { 3024 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3025 mdname(mddev), mddev->pers->name); 3026 return -EINVAL; 3027 } 3028 3029 /* Now find the new personality */ 3030 if (len == 0 || len >= sizeof(clevel)) 3031 return -EINVAL; 3032 strncpy(clevel, buf, len); 3033 if (clevel[len-1] == '\n') 3034 len--; 3035 clevel[len] = 0; 3036 if (strict_strtol(clevel, 10, &level)) 3037 level = LEVEL_NONE; 3038 3039 if (request_module("md-%s", clevel) != 0) 3040 request_module("md-level-%s", clevel); 3041 spin_lock(&pers_lock); 3042 pers = find_pers(level, clevel); 3043 if (!pers || !try_module_get(pers->owner)) { 3044 spin_unlock(&pers_lock); 3045 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3046 return -EINVAL; 3047 } 3048 spin_unlock(&pers_lock); 3049 3050 if (pers == mddev->pers) { 3051 /* Nothing to do! */ 3052 module_put(pers->owner); 3053 return rv; 3054 } 3055 if (!pers->takeover) { 3056 module_put(pers->owner); 3057 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3058 mdname(mddev), clevel); 3059 return -EINVAL; 3060 } 3061 3062 list_for_each_entry(rdev, &mddev->disks, same_set) 3063 rdev->new_raid_disk = rdev->raid_disk; 3064 3065 /* ->takeover must set new_* and/or delta_disks 3066 * if it succeeds, and may set them when it fails. 3067 */ 3068 priv = pers->takeover(mddev); 3069 if (IS_ERR(priv)) { 3070 mddev->new_level = mddev->level; 3071 mddev->new_layout = mddev->layout; 3072 mddev->new_chunk_sectors = mddev->chunk_sectors; 3073 mddev->raid_disks -= mddev->delta_disks; 3074 mddev->delta_disks = 0; 3075 module_put(pers->owner); 3076 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3077 mdname(mddev), clevel); 3078 return PTR_ERR(priv); 3079 } 3080 3081 /* Looks like we have a winner */ 3082 mddev_suspend(mddev); 3083 mddev->pers->stop(mddev); 3084 3085 if (mddev->pers->sync_request == NULL && 3086 pers->sync_request != NULL) { 3087 /* need to add the md_redundancy_group */ 3088 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3089 printk(KERN_WARNING 3090 "md: cannot register extra attributes for %s\n", 3091 mdname(mddev)); 3092 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3093 } 3094 if (mddev->pers->sync_request != NULL && 3095 pers->sync_request == NULL) { 3096 /* need to remove the md_redundancy_group */ 3097 if (mddev->to_remove == NULL) 3098 mddev->to_remove = &md_redundancy_group; 3099 } 3100 3101 if (mddev->pers->sync_request == NULL && 3102 mddev->external) { 3103 /* We are converting from a no-redundancy array 3104 * to a redundancy array and metadata is managed 3105 * externally so we need to be sure that writes 3106 * won't block due to a need to transition 3107 * clean->dirty 3108 * until external management is started. 3109 */ 3110 mddev->in_sync = 0; 3111 mddev->safemode_delay = 0; 3112 mddev->safemode = 0; 3113 } 3114 3115 list_for_each_entry(rdev, &mddev->disks, same_set) { 3116 char nm[20]; 3117 if (rdev->raid_disk < 0) 3118 continue; 3119 if (rdev->new_raid_disk >= mddev->raid_disks) 3120 rdev->new_raid_disk = -1; 3121 if (rdev->new_raid_disk == rdev->raid_disk) 3122 continue; 3123 sprintf(nm, "rd%d", rdev->raid_disk); 3124 sysfs_remove_link(&mddev->kobj, nm); 3125 } 3126 list_for_each_entry(rdev, &mddev->disks, same_set) { 3127 if (rdev->raid_disk < 0) 3128 continue; 3129 if (rdev->new_raid_disk == rdev->raid_disk) 3130 continue; 3131 rdev->raid_disk = rdev->new_raid_disk; 3132 if (rdev->raid_disk < 0) 3133 clear_bit(In_sync, &rdev->flags); 3134 else { 3135 char nm[20]; 3136 sprintf(nm, "rd%d", rdev->raid_disk); 3137 if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 3138 printk("md: cannot register %s for %s after level change\n", 3139 nm, mdname(mddev)); 3140 } 3141 } 3142 3143 module_put(mddev->pers->owner); 3144 mddev->pers = pers; 3145 mddev->private = priv; 3146 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3147 mddev->level = mddev->new_level; 3148 mddev->layout = mddev->new_layout; 3149 mddev->chunk_sectors = mddev->new_chunk_sectors; 3150 mddev->delta_disks = 0; 3151 if (mddev->pers->sync_request == NULL) { 3152 /* this is now an array without redundancy, so 3153 * it must always be in_sync 3154 */ 3155 mddev->in_sync = 1; 3156 del_timer_sync(&mddev->safemode_timer); 3157 } 3158 pers->run(mddev); 3159 mddev_resume(mddev); 3160 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3161 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3162 md_wakeup_thread(mddev->thread); 3163 sysfs_notify(&mddev->kobj, NULL, "level"); 3164 md_new_event(mddev); 3165 return rv; 3166} 3167 3168static struct md_sysfs_entry md_level = 3169__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3170 3171 3172static ssize_t 3173layout_show(mddev_t *mddev, char *page) 3174{ 3175 /* just a number, not meaningful for all levels */ 3176 if (mddev->reshape_position != MaxSector && 3177 mddev->layout != mddev->new_layout) 3178 return sprintf(page, "%d (%d)\n", 3179 mddev->new_layout, mddev->layout); 3180 return sprintf(page, "%d\n", mddev->layout); 3181} 3182 3183static ssize_t 3184layout_store(mddev_t *mddev, const char *buf, size_t len) 3185{ 3186 char *e; 3187 unsigned long n = simple_strtoul(buf, &e, 10); 3188 3189 if (!*buf || (*e && *e != '\n')) 3190 return -EINVAL; 3191 3192 if (mddev->pers) { 3193 int err; 3194 if (mddev->pers->check_reshape == NULL) 3195 return -EBUSY; 3196 mddev->new_layout = n; 3197 err = mddev->pers->check_reshape(mddev); 3198 if (err) { 3199 mddev->new_layout = mddev->layout; 3200 return err; 3201 } 3202 } else { 3203 mddev->new_layout = n; 3204 if (mddev->reshape_position == MaxSector) 3205 mddev->layout = n; 3206 } 3207 return len; 3208} 3209static struct md_sysfs_entry md_layout = 3210__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3211 3212 3213static ssize_t 3214raid_disks_show(mddev_t *mddev, char *page) 3215{ 3216 if (mddev->raid_disks == 0) 3217 return 0; 3218 if (mddev->reshape_position != MaxSector && 3219 mddev->delta_disks != 0) 3220 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3221 mddev->raid_disks - mddev->delta_disks); 3222 return sprintf(page, "%d\n", mddev->raid_disks); 3223} 3224 3225static int update_raid_disks(mddev_t *mddev, int raid_disks); 3226 3227static ssize_t 3228raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 3229{ 3230 char *e; 3231 int rv = 0; 3232 unsigned long n = simple_strtoul(buf, &e, 10); 3233 3234 if (!*buf || (*e && *e != '\n')) 3235 return -EINVAL; 3236 3237 if (mddev->pers) 3238 rv = update_raid_disks(mddev, n); 3239 else if (mddev->reshape_position != MaxSector) { 3240 int olddisks = mddev->raid_disks - mddev->delta_disks; 3241 mddev->delta_disks = n - olddisks; 3242 mddev->raid_disks = n; 3243 } else 3244 mddev->raid_disks = n; 3245 return rv ? rv : len; 3246} 3247static struct md_sysfs_entry md_raid_disks = 3248__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3249 3250static ssize_t 3251chunk_size_show(mddev_t *mddev, char *page) 3252{ 3253 if (mddev->reshape_position != MaxSector && 3254 mddev->chunk_sectors != mddev->new_chunk_sectors) 3255 return sprintf(page, "%d (%d)\n", 3256 mddev->new_chunk_sectors << 9, 3257 mddev->chunk_sectors << 9); 3258 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3259} 3260 3261static ssize_t 3262chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 3263{ 3264 char *e; 3265 unsigned long n = simple_strtoul(buf, &e, 10); 3266 3267 if (!*buf || (*e && *e != '\n')) 3268 return -EINVAL; 3269 3270 if (mddev->pers) { 3271 int err; 3272 if (mddev->pers->check_reshape == NULL) 3273 return -EBUSY; 3274 mddev->new_chunk_sectors = n >> 9; 3275 err = mddev->pers->check_reshape(mddev); 3276 if (err) { 3277 mddev->new_chunk_sectors = mddev->chunk_sectors; 3278 return err; 3279 } 3280 } else { 3281 mddev->new_chunk_sectors = n >> 9; 3282 if (mddev->reshape_position == MaxSector) 3283 mddev->chunk_sectors = n >> 9; 3284 } 3285 return len; 3286} 3287static struct md_sysfs_entry md_chunk_size = 3288__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3289 3290static ssize_t 3291resync_start_show(mddev_t *mddev, char *page) 3292{ 3293 if (mddev->recovery_cp == MaxSector) 3294 return sprintf(page, "none\n"); 3295 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3296} 3297 3298static ssize_t 3299resync_start_store(mddev_t *mddev, const char *buf, size_t len) 3300{ 3301 char *e; 3302 unsigned long long n = simple_strtoull(buf, &e, 10); 3303 3304 if (mddev->pers) 3305 return -EBUSY; 3306 if (cmd_match(buf, "none")) 3307 n = MaxSector; 3308 else if (!*buf || (*e && *e != '\n')) 3309 return -EINVAL; 3310 3311 mddev->recovery_cp = n; 3312 return len; 3313} 3314static struct md_sysfs_entry md_resync_start = 3315__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3316 3317/* 3318 * The array state can be: 3319 * 3320 * clear 3321 * No devices, no size, no level 3322 * Equivalent to STOP_ARRAY ioctl 3323 * inactive 3324 * May have some settings, but array is not active 3325 * all IO results in error 3326 * When written, doesn't tear down array, but just stops it 3327 * suspended (not supported yet) 3328 * All IO requests will block. The array can be reconfigured. 3329 * Writing this, if accepted, will block until array is quiescent 3330 * readonly 3331 * no resync can happen. no superblocks get written. 3332 * write requests fail 3333 * read-auto 3334 * like readonly, but behaves like 'clean' on a write request. 3335 * 3336 * clean - no pending writes, but otherwise active. 3337 * When written to inactive array, starts without resync 3338 * If a write request arrives then 3339 * if metadata is known, mark 'dirty' and switch to 'active'. 3340 * if not known, block and switch to write-pending 3341 * If written to an active array that has pending writes, then fails. 3342 * active 3343 * fully active: IO and resync can be happening. 3344 * When written to inactive array, starts with resync 3345 * 3346 * write-pending 3347 * clean, but writes are blocked waiting for 'active' to be written. 3348 * 3349 * active-idle 3350 * like active, but no writes have been seen for a while (100msec). 3351 * 3352 */ 3353enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3354 write_pending, active_idle, bad_word}; 3355static char *array_states[] = { 3356 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3357 "write-pending", "active-idle", NULL }; 3358 3359static int match_word(const char *word, char **list) 3360{ 3361 int n; 3362 for (n=0; list[n]; n++) 3363 if (cmd_match(word, list[n])) 3364 break; 3365 return n; 3366} 3367 3368static ssize_t 3369array_state_show(mddev_t *mddev, char *page) 3370{ 3371 enum array_state st = inactive; 3372 3373 if (mddev->pers) 3374 switch(mddev->ro) { 3375 case 1: 3376 st = readonly; 3377 break; 3378 case 2: 3379 st = read_auto; 3380 break; 3381 case 0: 3382 if (mddev->in_sync) 3383 st = clean; 3384 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3385 st = write_pending; 3386 else if (mddev->safemode) 3387 st = active_idle; 3388 else 3389 st = active; 3390 } 3391 else { 3392 if (list_empty(&mddev->disks) && 3393 mddev->raid_disks == 0 && 3394 mddev->dev_sectors == 0) 3395 st = clear; 3396 else 3397 st = inactive; 3398 } 3399 return sprintf(page, "%s\n", array_states[st]); 3400} 3401 3402static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3403static int md_set_readonly(mddev_t * mddev, int is_open); 3404static int do_md_run(mddev_t * mddev); 3405static int restart_array(mddev_t *mddev); 3406 3407static ssize_t 3408array_state_store(mddev_t *mddev, const char *buf, size_t len) 3409{ 3410 int err = -EINVAL; 3411 enum array_state st = match_word(buf, array_states); 3412 switch(st) { 3413 case bad_word: 3414 break; 3415 case clear: 3416 /* stopping an active array */ 3417 if (atomic_read(&mddev->openers) > 0) 3418 return -EBUSY; 3419 err = do_md_stop(mddev, 0, 0); 3420 break; 3421 case inactive: 3422 /* stopping an active array */ 3423 if (mddev->pers) { 3424 if (atomic_read(&mddev->openers) > 0) 3425 return -EBUSY; 3426 err = do_md_stop(mddev, 2, 0); 3427 } else 3428 err = 0; /* already inactive */ 3429 break; 3430 case suspended: 3431 break; /* not supported yet */ 3432 case readonly: 3433 if (mddev->pers) 3434 err = md_set_readonly(mddev, 0); 3435 else { 3436 mddev->ro = 1; 3437 set_disk_ro(mddev->gendisk, 1); 3438 err = do_md_run(mddev); 3439 } 3440 break; 3441 case read_auto: 3442 if (mddev->pers) { 3443 if (mddev->ro == 0) 3444 err = md_set_readonly(mddev, 0); 3445 else if (mddev->ro == 1) 3446 err = restart_array(mddev); 3447 if (err == 0) { 3448 mddev->ro = 2; 3449 set_disk_ro(mddev->gendisk, 0); 3450 } 3451 } else { 3452 mddev->ro = 2; 3453 err = do_md_run(mddev); 3454 } 3455 break; 3456 case clean: 3457 if (mddev->pers) { 3458 restart_array(mddev); 3459 spin_lock_irq(&mddev->write_lock); 3460 if (atomic_read(&mddev->writes_pending) == 0) { 3461 if (mddev->in_sync == 0) { 3462 mddev->in_sync = 1; 3463 if (mddev->safemode == 1) 3464 mddev->safemode = 0; 3465 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3466 } 3467 err = 0; 3468 } else 3469 err = -EBUSY; 3470 spin_unlock_irq(&mddev->write_lock); 3471 } else 3472 err = -EINVAL; 3473 break; 3474 case active: 3475 if (mddev->pers) { 3476 restart_array(mddev); 3477 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3478 wake_up(&mddev->sb_wait); 3479 err = 0; 3480 } else { 3481 mddev->ro = 0; 3482 set_disk_ro(mddev->gendisk, 0); 3483 err = do_md_run(mddev); 3484 } 3485 break; 3486 case write_pending: 3487 case active_idle: 3488 /* these cannot be set */ 3489 break; 3490 } 3491 if (err) 3492 return err; 3493 else { 3494 sysfs_notify_dirent_safe(mddev->sysfs_state); 3495 return len; 3496 } 3497} 3498static struct md_sysfs_entry md_array_state = 3499__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3500 3501static ssize_t 3502max_corrected_read_errors_show(mddev_t *mddev, char *page) { 3503 return sprintf(page, "%d\n", 3504 atomic_read(&mddev->max_corr_read_errors)); 3505} 3506 3507static ssize_t 3508max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) 3509{ 3510 char *e; 3511 unsigned long n = simple_strtoul(buf, &e, 10); 3512 3513 if (*buf && (*e == 0 || *e == '\n')) { 3514 atomic_set(&mddev->max_corr_read_errors, n); 3515 return len; 3516 } 3517 return -EINVAL; 3518} 3519 3520static struct md_sysfs_entry max_corr_read_errors = 3521__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3522 max_corrected_read_errors_store); 3523 3524static ssize_t 3525null_show(mddev_t *mddev, char *page) 3526{ 3527 return -EINVAL; 3528} 3529 3530static ssize_t 3531new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3532{ 3533 /* buf must be %d:%d\n? giving major and minor numbers */ 3534 /* The new device is added to the array. 3535 * If the array has a persistent superblock, we read the 3536 * superblock to initialise info and check validity. 3537 * Otherwise, only checking done is that in bind_rdev_to_array, 3538 * which mainly checks size. 3539 */ 3540 char *e; 3541 int major = simple_strtoul(buf, &e, 10); 3542 int minor; 3543 dev_t dev; 3544 mdk_rdev_t *rdev; 3545 int err; 3546 3547 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3548 return -EINVAL; 3549 minor = simple_strtoul(e+1, &e, 10); 3550 if (*e && *e != '\n') 3551 return -EINVAL; 3552 dev = MKDEV(major, minor); 3553 if (major != MAJOR(dev) || 3554 minor != MINOR(dev)) 3555 return -EOVERFLOW; 3556 3557 3558 if (mddev->persistent) { 3559 rdev = md_import_device(dev, mddev->major_version, 3560 mddev->minor_version); 3561 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3562 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3563 mdk_rdev_t, same_set); 3564 err = super_types[mddev->major_version] 3565 .load_super(rdev, rdev0, mddev->minor_version); 3566 if (err < 0) 3567 goto out; 3568 } 3569 } else if (mddev->external) 3570 rdev = md_import_device(dev, -2, -1); 3571 else 3572 rdev = md_import_device(dev, -1, -1); 3573 3574 if (IS_ERR(rdev)) 3575 return PTR_ERR(rdev); 3576 err = bind_rdev_to_array(rdev, mddev); 3577 out: 3578 if (err) 3579 export_rdev(rdev); 3580 return err ? err : len; 3581} 3582 3583static struct md_sysfs_entry md_new_device = 3584__ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3585 3586static ssize_t 3587bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3588{ 3589 char *end; 3590 unsigned long chunk, end_chunk; 3591 3592 if (!mddev->bitmap) 3593 goto out; 3594 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3595 while (*buf) { 3596 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3597 if (buf == end) break; 3598 if (*end == '-') { /* range */ 3599 buf = end + 1; 3600 end_chunk = simple_strtoul(buf, &end, 0); 3601 if (buf == end) break; 3602 } 3603 if (*end && !isspace(*end)) break; 3604 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3605 buf = skip_spaces(end); 3606 } 3607 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3608out: 3609 return len; 3610} 3611 3612static struct md_sysfs_entry md_bitmap = 3613__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3614 3615static ssize_t 3616size_show(mddev_t *mddev, char *page) 3617{ 3618 return sprintf(page, "%llu\n", 3619 (unsigned long long)mddev->dev_sectors / 2); 3620} 3621 3622static int update_size(mddev_t *mddev, sector_t num_sectors); 3623 3624static ssize_t 3625size_store(mddev_t *mddev, const char *buf, size_t len) 3626{ 3627 /* If array is inactive, we can reduce the component size, but 3628 * not increase it (except from 0). 3629 * If array is active, we can try an on-line resize 3630 */ 3631 sector_t sectors; 3632 int err = strict_blocks_to_sectors(buf, §ors); 3633 3634 if (err < 0) 3635 return err; 3636 if (mddev->pers) { 3637 err = update_size(mddev, sectors); 3638 md_update_sb(mddev, 1); 3639 } else { 3640 if (mddev->dev_sectors == 0 || 3641 mddev->dev_sectors > sectors) 3642 mddev->dev_sectors = sectors; 3643 else 3644 err = -ENOSPC; 3645 } 3646 return err ? err : len; 3647} 3648 3649static struct md_sysfs_entry md_size = 3650__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3651 3652 3653/* Metdata version. 3654 * This is one of 3655 * 'none' for arrays with no metadata (good luck...) 3656 * 'external' for arrays with externally managed metadata, 3657 * or N.M for internally known formats 3658 */ 3659static ssize_t 3660metadata_show(mddev_t *mddev, char *page) 3661{ 3662 if (mddev->persistent) 3663 return sprintf(page, "%d.%d\n", 3664 mddev->major_version, mddev->minor_version); 3665 else if (mddev->external) 3666 return sprintf(page, "external:%s\n", mddev->metadata_type); 3667 else 3668 return sprintf(page, "none\n"); 3669} 3670 3671static ssize_t 3672metadata_store(mddev_t *mddev, const char *buf, size_t len) 3673{ 3674 int major, minor; 3675 char *e; 3676 /* Changing the details of 'external' metadata is 3677 * always permitted. Otherwise there must be 3678 * no devices attached to the array. 3679 */ 3680 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3681 ; 3682 else if (!list_empty(&mddev->disks)) 3683 return -EBUSY; 3684 3685 if (cmd_match(buf, "none")) { 3686 mddev->persistent = 0; 3687 mddev->external = 0; 3688 mddev->major_version = 0; 3689 mddev->minor_version = 90; 3690 return len; 3691 } 3692 if (strncmp(buf, "external:", 9) == 0) { 3693 size_t namelen = len-9; 3694 if (namelen >= sizeof(mddev->metadata_type)) 3695 namelen = sizeof(mddev->metadata_type)-1; 3696 strncpy(mddev->metadata_type, buf+9, namelen); 3697 mddev->metadata_type[namelen] = 0; 3698 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3699 mddev->metadata_type[--namelen] = 0; 3700 mddev->persistent = 0; 3701 mddev->external = 1; 3702 mddev->major_version = 0; 3703 mddev->minor_version = 90; 3704 return len; 3705 } 3706 major = simple_strtoul(buf, &e, 10); 3707 if (e==buf || *e != '.') 3708 return -EINVAL; 3709 buf = e+1; 3710 minor = simple_strtoul(buf, &e, 10); 3711 if (e==buf || (*e && *e != '\n') ) 3712 return -EINVAL; 3713 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3714 return -ENOENT; 3715 mddev->major_version = major; 3716 mddev->minor_version = minor; 3717 mddev->persistent = 1; 3718 mddev->external = 0; 3719 return len; 3720} 3721 3722static struct md_sysfs_entry md_metadata = 3723__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3724 3725static ssize_t 3726action_show(mddev_t *mddev, char *page) 3727{ 3728 char *type = "idle"; 3729 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3730 type = "frozen"; 3731 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3732 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3733 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3734 type = "reshape"; 3735 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3736 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3737 type = "resync"; 3738 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3739 type = "check"; 3740 else 3741 type = "repair"; 3742 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3743 type = "recover"; 3744 } 3745 return sprintf(page, "%s\n", type); 3746} 3747 3748static ssize_t 3749action_store(mddev_t *mddev, const char *page, size_t len) 3750{ 3751 if (!mddev->pers || !mddev->pers->sync_request) 3752 return -EINVAL; 3753 3754 if (cmd_match(page, "frozen")) 3755 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3756 else 3757 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3758 3759 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 3760 if (mddev->sync_thread) { 3761 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3762 md_unregister_thread(mddev->sync_thread); 3763 mddev->sync_thread = NULL; 3764 mddev->recovery = 0; 3765 } 3766 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3767 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3768 return -EBUSY; 3769 else if (cmd_match(page, "resync")) 3770 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3771 else if (cmd_match(page, "recover")) { 3772 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3773 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3774 } else if (cmd_match(page, "reshape")) { 3775 int err; 3776 if (mddev->pers->start_reshape == NULL) 3777 return -EINVAL; 3778 err = mddev->pers->start_reshape(mddev); 3779 if (err) 3780 return err; 3781 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3782 } else { 3783 if (cmd_match(page, "check")) 3784 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3785 else if (!cmd_match(page, "repair")) 3786 return -EINVAL; 3787 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3788 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3789 } 3790 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3791 md_wakeup_thread(mddev->thread); 3792 sysfs_notify_dirent_safe(mddev->sysfs_action); 3793 return len; 3794} 3795 3796static ssize_t 3797mismatch_cnt_show(mddev_t *mddev, char *page) 3798{ 3799 return sprintf(page, "%llu\n", 3800 (unsigned long long) mddev->resync_mismatches); 3801} 3802 3803static struct md_sysfs_entry md_scan_mode = 3804__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3805 3806 3807static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3808 3809static ssize_t 3810sync_min_show(mddev_t *mddev, char *page) 3811{ 3812 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3813 mddev->sync_speed_min ? "local": "system"); 3814} 3815 3816static ssize_t 3817sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3818{ 3819 int min; 3820 char *e; 3821 if (strncmp(buf, "system", 6)==0) { 3822 mddev->sync_speed_min = 0; 3823 return len; 3824 } 3825 min = simple_strtoul(buf, &e, 10); 3826 if (buf == e || (*e && *e != '\n') || min <= 0) 3827 return -EINVAL; 3828 mddev->sync_speed_min = min; 3829 return len; 3830} 3831 3832static struct md_sysfs_entry md_sync_min = 3833__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3834 3835static ssize_t 3836sync_max_show(mddev_t *mddev, char *page) 3837{ 3838 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3839 mddev->sync_speed_max ? "local": "system"); 3840} 3841 3842static ssize_t 3843sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3844{ 3845 int max; 3846 char *e; 3847 if (strncmp(buf, "system", 6)==0) { 3848 mddev->sync_speed_max = 0; 3849 return len; 3850 } 3851 max = simple_strtoul(buf, &e, 10); 3852 if (buf == e || (*e && *e != '\n') || max <= 0) 3853 return -EINVAL; 3854 mddev->sync_speed_max = max; 3855 return len; 3856} 3857 3858static struct md_sysfs_entry md_sync_max = 3859__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3860 3861static ssize_t 3862degraded_show(mddev_t *mddev, char *page) 3863{ 3864 return sprintf(page, "%d\n", mddev->degraded); 3865} 3866static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3867 3868static ssize_t 3869sync_force_parallel_show(mddev_t *mddev, char *page) 3870{ 3871 return sprintf(page, "%d\n", mddev->parallel_resync); 3872} 3873 3874static ssize_t 3875sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3876{ 3877 long n; 3878 3879 if (strict_strtol(buf, 10, &n)) 3880 return -EINVAL; 3881 3882 if (n != 0 && n != 1) 3883 return -EINVAL; 3884 3885 mddev->parallel_resync = n; 3886 3887 if (mddev->sync_thread) 3888 wake_up(&resync_wait); 3889 3890 return len; 3891} 3892 3893/* force parallel resync, even with shared block devices */ 3894static struct md_sysfs_entry md_sync_force_parallel = 3895__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3896 sync_force_parallel_show, sync_force_parallel_store); 3897 3898static ssize_t 3899sync_speed_show(mddev_t *mddev, char *page) 3900{ 3901 unsigned long resync, dt, db; 3902 if (mddev->curr_resync == 0) 3903 return sprintf(page, "none\n"); 3904 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3905 dt = (jiffies - mddev->resync_mark) / HZ; 3906 if (!dt) dt++; 3907 db = resync - mddev->resync_mark_cnt; 3908 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3909} 3910 3911static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3912 3913static ssize_t 3914sync_completed_show(mddev_t *mddev, char *page) 3915{ 3916 unsigned long max_sectors, resync; 3917 3918 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3919 return sprintf(page, "none\n"); 3920 3921 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3922 max_sectors = mddev->resync_max_sectors; 3923 else 3924 max_sectors = mddev->dev_sectors; 3925 3926 resync = mddev->curr_resync_completed; 3927 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3928} 3929 3930static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3931 3932static ssize_t 3933min_sync_show(mddev_t *mddev, char *page) 3934{ 3935 return sprintf(page, "%llu\n", 3936 (unsigned long long)mddev->resync_min); 3937} 3938static ssize_t 3939min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3940{ 3941 unsigned long long min; 3942 if (strict_strtoull(buf, 10, &min)) 3943 return -EINVAL; 3944 if (min > mddev->resync_max) 3945 return -EINVAL; 3946 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3947 return -EBUSY; 3948 3949 /* Must be a multiple of chunk_size */ 3950 if (mddev->chunk_sectors) { 3951 sector_t temp = min; 3952 if (sector_div(temp, mddev->chunk_sectors)) 3953 return -EINVAL; 3954 } 3955 mddev->resync_min = min; 3956 3957 return len; 3958} 3959 3960static struct md_sysfs_entry md_min_sync = 3961__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3962 3963static ssize_t 3964max_sync_show(mddev_t *mddev, char *page) 3965{ 3966 if (mddev->resync_max == MaxSector) 3967 return sprintf(page, "max\n"); 3968 else 3969 return sprintf(page, "%llu\n", 3970 (unsigned long long)mddev->resync_max); 3971} 3972static ssize_t 3973max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3974{ 3975 if (strncmp(buf, "max", 3) == 0) 3976 mddev->resync_max = MaxSector; 3977 else { 3978 unsigned long long max; 3979 if (strict_strtoull(buf, 10, &max)) 3980 return -EINVAL; 3981 if (max < mddev->resync_min) 3982 return -EINVAL; 3983 if (max < mddev->resync_max && 3984 mddev->ro == 0 && 3985 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3986 return -EBUSY; 3987 3988 /* Must be a multiple of chunk_size */ 3989 if (mddev->chunk_sectors) { 3990 sector_t temp = max; 3991 if (sector_div(temp, mddev->chunk_sectors)) 3992 return -EINVAL; 3993 } 3994 mddev->resync_max = max; 3995 } 3996 wake_up(&mddev->recovery_wait); 3997 return len; 3998} 3999 4000static struct md_sysfs_entry md_max_sync = 4001__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4002 4003static ssize_t 4004suspend_lo_show(mddev_t *mddev, char *page) 4005{ 4006 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4007} 4008 4009static ssize_t 4010suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 4011{ 4012 char *e; 4013 unsigned long long new = simple_strtoull(buf, &e, 10); 4014 4015 if (mddev->pers == NULL || 4016 mddev->pers->quiesce == NULL) 4017 return -EINVAL; 4018 if (buf == e || (*e && *e != '\n')) 4019 return -EINVAL; 4020 if (new >= mddev->suspend_hi || 4021 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 4022 mddev->suspend_lo = new; 4023 mddev->pers->quiesce(mddev, 2); 4024 return len; 4025 } else 4026 return -EINVAL; 4027} 4028static struct md_sysfs_entry md_suspend_lo = 4029__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4030 4031 4032static ssize_t 4033suspend_hi_show(mddev_t *mddev, char *page) 4034{ 4035 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4036} 4037 4038static ssize_t 4039suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 4040{ 4041 char *e; 4042 unsigned long long new = simple_strtoull(buf, &e, 10); 4043 4044 if (mddev->pers == NULL || 4045 mddev->pers->quiesce == NULL) 4046 return -EINVAL; 4047 if (buf == e || (*e && *e != '\n')) 4048 return -EINVAL; 4049 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 4050 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 4051 mddev->suspend_hi = new; 4052 mddev->pers->quiesce(mddev, 1); 4053 mddev->pers->quiesce(mddev, 0); 4054 return len; 4055 } else 4056 return -EINVAL; 4057} 4058static struct md_sysfs_entry md_suspend_hi = 4059__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4060 4061static ssize_t 4062reshape_position_show(mddev_t *mddev, char *page) 4063{ 4064 if (mddev->reshape_position != MaxSector) 4065 return sprintf(page, "%llu\n", 4066 (unsigned long long)mddev->reshape_position); 4067 strcpy(page, "none\n"); 4068 return 5; 4069} 4070 4071static ssize_t 4072reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 4073{ 4074 char *e; 4075 unsigned long long new = simple_strtoull(buf, &e, 10); 4076 if (mddev->pers) 4077 return -EBUSY; 4078 if (buf == e || (*e && *e != '\n')) 4079 return -EINVAL; 4080 mddev->reshape_position = new; 4081 mddev->delta_disks = 0; 4082 mddev->new_level = mddev->level; 4083 mddev->new_layout = mddev->layout; 4084 mddev->new_chunk_sectors = mddev->chunk_sectors; 4085 return len; 4086} 4087 4088static struct md_sysfs_entry md_reshape_position = 4089__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4090 reshape_position_store); 4091 4092static ssize_t 4093array_size_show(mddev_t *mddev, char *page) 4094{ 4095 if (mddev->external_size) 4096 return sprintf(page, "%llu\n", 4097 (unsigned long long)mddev->array_sectors/2); 4098 else 4099 return sprintf(page, "default\n"); 4100} 4101 4102static ssize_t 4103array_size_store(mddev_t *mddev, const char *buf, size_t len) 4104{ 4105 sector_t sectors; 4106 4107 if (strncmp(buf, "default", 7) == 0) { 4108 if (mddev->pers) 4109 sectors = mddev->pers->size(mddev, 0, 0); 4110 else 4111 sectors = mddev->array_sectors; 4112 4113 mddev->external_size = 0; 4114 } else { 4115 if (strict_blocks_to_sectors(buf, §ors) < 0) 4116 return -EINVAL; 4117 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4118 return -E2BIG; 4119 4120 mddev->external_size = 1; 4121 } 4122 4123 mddev->array_sectors = sectors; 4124 set_capacity(mddev->gendisk, mddev->array_sectors); 4125 if (mddev->pers) 4126 revalidate_disk(mddev->gendisk); 4127 4128 return len; 4129} 4130 4131static struct md_sysfs_entry md_array_size = 4132__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4133 array_size_store); 4134 4135static struct attribute *md_default_attrs[] = { 4136 &md_level.attr, 4137 &md_layout.attr, 4138 &md_raid_disks.attr, 4139 &md_chunk_size.attr, 4140 &md_size.attr, 4141 &md_resync_start.attr, 4142 &md_metadata.attr, 4143 &md_new_device.attr, 4144 &md_safe_delay.attr, 4145 &md_array_state.attr, 4146 &md_reshape_position.attr, 4147 &md_array_size.attr, 4148 &max_corr_read_errors.attr, 4149 NULL, 4150}; 4151 4152static struct attribute *md_redundancy_attrs[] = { 4153 &md_scan_mode.attr, 4154 &md_mismatches.attr, 4155 &md_sync_min.attr, 4156 &md_sync_max.attr, 4157 &md_sync_speed.attr, 4158 &md_sync_force_parallel.attr, 4159 &md_sync_completed.attr, 4160 &md_min_sync.attr, 4161 &md_max_sync.attr, 4162 &md_suspend_lo.attr, 4163 &md_suspend_hi.attr, 4164 &md_bitmap.attr, 4165 &md_degraded.attr, 4166 NULL, 4167}; 4168static struct attribute_group md_redundancy_group = { 4169 .name = NULL, 4170 .attrs = md_redundancy_attrs, 4171}; 4172 4173 4174static ssize_t 4175md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4176{ 4177 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4178 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4179 ssize_t rv; 4180 4181 if (!entry->show) 4182 return -EIO; 4183 rv = mddev_lock(mddev); 4184 if (!rv) { 4185 rv = entry->show(mddev, page); 4186 mddev_unlock(mddev); 4187 } 4188 return rv; 4189} 4190 4191static ssize_t 4192md_attr_store(struct kobject *kobj, struct attribute *attr, 4193 const char *page, size_t length) 4194{ 4195 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4196 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4197 ssize_t rv; 4198 4199 if (!entry->store) 4200 return -EIO; 4201 if (!capable(CAP_SYS_ADMIN)) 4202 return -EACCES; 4203 rv = mddev_lock(mddev); 4204 if (mddev->hold_active == UNTIL_IOCTL) 4205 mddev->hold_active = 0; 4206 if (!rv) { 4207 rv = entry->store(mddev, page, length); 4208 mddev_unlock(mddev); 4209 } 4210 return rv; 4211} 4212 4213static void md_free(struct kobject *ko) 4214{ 4215 mddev_t *mddev = container_of(ko, mddev_t, kobj); 4216 4217 if (mddev->sysfs_state) 4218 sysfs_put(mddev->sysfs_state); 4219 4220 if (mddev->gendisk) { 4221 del_gendisk(mddev->gendisk); 4222 put_disk(mddev->gendisk); 4223 } 4224 if (mddev->queue) 4225 blk_cleanup_queue(mddev->queue); 4226 4227 kfree(mddev); 4228} 4229 4230static const struct sysfs_ops md_sysfs_ops = { 4231 .show = md_attr_show, 4232 .store = md_attr_store, 4233}; 4234static struct kobj_type md_ktype = { 4235 .release = md_free, 4236 .sysfs_ops = &md_sysfs_ops, 4237 .default_attrs = md_default_attrs, 4238}; 4239 4240int mdp_major = 0; 4241 4242static void mddev_delayed_delete(struct work_struct *ws) 4243{ 4244 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4245 4246 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4247 kobject_del(&mddev->kobj); 4248 kobject_put(&mddev->kobj); 4249} 4250 4251static int md_alloc(dev_t dev, char *name) 4252{ 4253 static DEFINE_MUTEX(disks_mutex); 4254 mddev_t *mddev = mddev_find(dev); 4255 struct gendisk *disk; 4256 int partitioned; 4257 int shift; 4258 int unit; 4259 int error; 4260 4261 if (!mddev) 4262 return -ENODEV; 4263 4264 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4265 shift = partitioned ? MdpMinorShift : 0; 4266 unit = MINOR(mddev->unit) >> shift; 4267 4268 /* wait for any previous instance if this device 4269 * to be completed removed (mddev_delayed_delete). 4270 */ 4271 flush_scheduled_work(); 4272 4273 mutex_lock(&disks_mutex); 4274 error = -EEXIST; 4275 if (mddev->gendisk) 4276 goto abort; 4277 4278 if (name) { 4279 /* Need to ensure that 'name' is not a duplicate. 4280 */ 4281 mddev_t *mddev2; 4282 spin_lock(&all_mddevs_lock); 4283 4284 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4285 if (mddev2->gendisk && 4286 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4287 spin_unlock(&all_mddevs_lock); 4288 goto abort; 4289 } 4290 spin_unlock(&all_mddevs_lock); 4291 } 4292 4293 error = -ENOMEM; 4294 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4295 if (!mddev->queue) 4296 goto abort; 4297 mddev->queue->queuedata = mddev; 4298 4299 blk_queue_make_request(mddev->queue, md_make_request); 4300 4301 disk = alloc_disk(1 << shift); 4302 if (!disk) { 4303 blk_cleanup_queue(mddev->queue); 4304 mddev->queue = NULL; 4305 goto abort; 4306 } 4307 disk->major = MAJOR(mddev->unit); 4308 disk->first_minor = unit << shift; 4309 if (name) 4310 strcpy(disk->disk_name, name); 4311 else if (partitioned) 4312 sprintf(disk->disk_name, "md_d%d", unit); 4313 else 4314 sprintf(disk->disk_name, "md%d", unit); 4315 disk->fops = &md_fops; 4316 disk->private_data = mddev; 4317 disk->queue = mddev->queue; 4318 /* Allow extended partitions. This makes the 4319 * 'mdp' device redundant, but we can't really 4320 * remove it now. 4321 */ 4322 disk->flags |= GENHD_FL_EXT_DEVT; 4323 add_disk(disk); 4324 mddev->gendisk = disk; 4325 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4326 &disk_to_dev(disk)->kobj, "%s", "md"); 4327 if (error) { 4328 /* This isn't possible, but as kobject_init_and_add is marked 4329 * __must_check, we must do something with the result 4330 */ 4331 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4332 disk->disk_name); 4333 error = 0; 4334 } 4335 if (mddev->kobj.sd && 4336 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4337 printk(KERN_DEBUG "pointless warning\n"); 4338 abort: 4339 mutex_unlock(&disks_mutex); 4340 if (!error && mddev->kobj.sd) { 4341 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4342 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4343 } 4344 mddev_put(mddev); 4345 return error; 4346} 4347 4348static struct kobject *md_probe(dev_t dev, int *part, void *data) 4349{ 4350 md_alloc(dev, NULL); 4351 return NULL; 4352} 4353 4354static int add_named_array(const char *val, struct kernel_param *kp) 4355{ 4356 /* val must be "md_*" where * is not all digits. 4357 * We allocate an array with a large free minor number, and 4358 * set the name to val. val must not already be an active name. 4359 */ 4360 int len = strlen(val); 4361 char buf[DISK_NAME_LEN]; 4362 4363 while (len && val[len-1] == '\n') 4364 len--; 4365 if (len >= DISK_NAME_LEN) 4366 return -E2BIG; 4367 strlcpy(buf, val, len+1); 4368 if (strncmp(buf, "md_", 3) != 0) 4369 return -EINVAL; 4370 return md_alloc(0, buf); 4371} 4372 4373static void md_safemode_timeout(unsigned long data) 4374{ 4375 mddev_t *mddev = (mddev_t *) data; 4376 4377 if (!atomic_read(&mddev->writes_pending)) { 4378 mddev->safemode = 1; 4379 if (mddev->external) 4380 sysfs_notify_dirent_safe(mddev->sysfs_state); 4381 } 4382 md_wakeup_thread(mddev->thread); 4383} 4384 4385static int start_dirty_degraded; 4386 4387int md_run(mddev_t *mddev) 4388{ 4389 int err; 4390 mdk_rdev_t *rdev; 4391 struct mdk_personality *pers; 4392 4393 if (list_empty(&mddev->disks)) 4394 /* cannot run an array with no devices.. */ 4395 return -EINVAL; 4396 4397 if (mddev->pers) 4398 return -EBUSY; 4399 /* Cannot run until previous stop completes properly */ 4400 if (mddev->sysfs_active) 4401 return -EBUSY; 4402 4403 /* 4404 * Analyze all RAID superblock(s) 4405 */ 4406 if (!mddev->raid_disks) { 4407 if (!mddev->persistent) 4408 return -EINVAL; 4409 analyze_sbs(mddev); 4410 } 4411 4412 if (mddev->level != LEVEL_NONE) 4413 request_module("md-level-%d", mddev->level); 4414 else if (mddev->clevel[0]) 4415 request_module("md-%s", mddev->clevel); 4416 4417 /* 4418 * Drop all container device buffers, from now on 4419 * the only valid external interface is through the md 4420 * device. 4421 */ 4422 list_for_each_entry(rdev, &mddev->disks, same_set) { 4423 if (test_bit(Faulty, &rdev->flags)) 4424 continue; 4425 sync_blockdev(rdev->bdev); 4426 invalidate_bdev(rdev->bdev); 4427 4428 /* perform some consistency tests on the device. 4429 * We don't want the data to overlap the metadata, 4430 * Internal Bitmap issues have been handled elsewhere. 4431 */ 4432 if (rdev->data_offset < rdev->sb_start) { 4433 if (mddev->dev_sectors && 4434 rdev->data_offset + mddev->dev_sectors 4435 > rdev->sb_start) { 4436 printk("md: %s: data overlaps metadata\n", 4437 mdname(mddev)); 4438 return -EINVAL; 4439 } 4440 } else { 4441 if (rdev->sb_start + rdev->sb_size/512 4442 > rdev->data_offset) { 4443 printk("md: %s: metadata overlaps data\n", 4444 mdname(mddev)); 4445 return -EINVAL; 4446 } 4447 } 4448 sysfs_notify_dirent_safe(rdev->sysfs_state); 4449 } 4450 4451 spin_lock(&pers_lock); 4452 pers = find_pers(mddev->level, mddev->clevel); 4453 if (!pers || !try_module_get(pers->owner)) { 4454 spin_unlock(&pers_lock); 4455 if (mddev->level != LEVEL_NONE) 4456 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4457 mddev->level); 4458 else 4459 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4460 mddev->clevel); 4461 return -EINVAL; 4462 } 4463 mddev->pers = pers; 4464 spin_unlock(&pers_lock); 4465 if (mddev->level != pers->level) { 4466 mddev->level = pers->level; 4467 mddev->new_level = pers->level; 4468 } 4469 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4470 4471 if (mddev->reshape_position != MaxSector && 4472 pers->start_reshape == NULL) { 4473 /* This personality cannot handle reshaping... */ 4474 mddev->pers = NULL; 4475 module_put(pers->owner); 4476 return -EINVAL; 4477 } 4478 4479 if (pers->sync_request) { 4480 /* Warn if this is a potentially silly 4481 * configuration. 4482 */ 4483 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4484 mdk_rdev_t *rdev2; 4485 int warned = 0; 4486 4487 list_for_each_entry(rdev, &mddev->disks, same_set) 4488 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4489 if (rdev < rdev2 && 4490 rdev->bdev->bd_contains == 4491 rdev2->bdev->bd_contains) { 4492 printk(KERN_WARNING 4493 "%s: WARNING: %s appears to be" 4494 " on the same physical disk as" 4495 " %s.\n", 4496 mdname(mddev), 4497 bdevname(rdev->bdev,b), 4498 bdevname(rdev2->bdev,b2)); 4499 warned = 1; 4500 } 4501 } 4502 4503 if (warned) 4504 printk(KERN_WARNING 4505 "True protection against single-disk" 4506 " failure might be compromised.\n"); 4507 } 4508 4509 mddev->recovery = 0; 4510 /* may be over-ridden by personality */ 4511 mddev->resync_max_sectors = mddev->dev_sectors; 4512 4513 mddev->barriers_work = 1; 4514 mddev->ok_start_degraded = start_dirty_degraded; 4515 4516 if (start_readonly && mddev->ro == 0) 4517 mddev->ro = 2; /* read-only, but switch on first write */ 4518 4519 err = mddev->pers->run(mddev); 4520 if (err) 4521 printk(KERN_ERR "md: pers->run() failed ...\n"); 4522 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4523 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4524 " but 'external_size' not in effect?\n", __func__); 4525 printk(KERN_ERR 4526 "md: invalid array_size %llu > default size %llu\n", 4527 (unsigned long long)mddev->array_sectors / 2, 4528 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4529 err = -EINVAL; 4530 mddev->pers->stop(mddev); 4531 } 4532 if (err == 0 && mddev->pers->sync_request) { 4533 err = bitmap_create(mddev); 4534 if (err) { 4535 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4536 mdname(mddev), err); 4537 mddev->pers->stop(mddev); 4538 } 4539 } 4540 if (err) { 4541 module_put(mddev->pers->owner); 4542 mddev->pers = NULL; 4543 bitmap_destroy(mddev); 4544 return err; 4545 } 4546 if (mddev->pers->sync_request) { 4547 if (mddev->kobj.sd && 4548 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4549 printk(KERN_WARNING 4550 "md: cannot register extra attributes for %s\n", 4551 mdname(mddev)); 4552 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4553 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4554 mddev->ro = 0; 4555 4556 atomic_set(&mddev->writes_pending,0); 4557 atomic_set(&mddev->max_corr_read_errors, 4558 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4559 mddev->safemode = 0; 4560 mddev->safemode_timer.function = md_safemode_timeout; 4561 mddev->safemode_timer.data = (unsigned long) mddev; 4562 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4563 mddev->in_sync = 1; 4564 smp_wmb(); 4565 mddev->ready = 1; 4566 list_for_each_entry(rdev, &mddev->disks, same_set) 4567 if (rdev->raid_disk >= 0) { 4568 char nm[20]; 4569 sprintf(nm, "rd%d", rdev->raid_disk); 4570 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 4571 /* failure here is OK */; 4572 } 4573 4574 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4575 4576 if (mddev->flags) 4577 md_update_sb(mddev, 0); 4578 4579 md_wakeup_thread(mddev->thread); 4580 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4581 4582 md_new_event(mddev); 4583 sysfs_notify_dirent_safe(mddev->sysfs_state); 4584 sysfs_notify_dirent_safe(mddev->sysfs_action); 4585 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4586 return 0; 4587} 4588EXPORT_SYMBOL_GPL(md_run); 4589 4590static int do_md_run(mddev_t *mddev) 4591{ 4592 int err; 4593 4594 err = md_run(mddev); 4595 if (err) 4596 goto out; 4597 err = bitmap_load(mddev); 4598 if (err) { 4599 bitmap_destroy(mddev); 4600 goto out; 4601 } 4602 set_capacity(mddev->gendisk, mddev->array_sectors); 4603 revalidate_disk(mddev->gendisk); 4604 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4605out: 4606 return err; 4607} 4608 4609static int restart_array(mddev_t *mddev) 4610{ 4611 struct gendisk *disk = mddev->gendisk; 4612 4613 /* Complain if it has no devices */ 4614 if (list_empty(&mddev->disks)) 4615 return -ENXIO; 4616 if (!mddev->pers) 4617 return -EINVAL; 4618 if (!mddev->ro) 4619 return -EBUSY; 4620 mddev->safemode = 0; 4621 mddev->ro = 0; 4622 set_disk_ro(disk, 0); 4623 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4624 mdname(mddev)); 4625 /* Kick recovery or resync if necessary */ 4626 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4627 md_wakeup_thread(mddev->thread); 4628 md_wakeup_thread(mddev->sync_thread); 4629 sysfs_notify_dirent_safe(mddev->sysfs_state); 4630 return 0; 4631} 4632 4633/* similar to deny_write_access, but accounts for our holding a reference 4634 * to the file ourselves */ 4635static int deny_bitmap_write_access(struct file * file) 4636{ 4637 struct inode *inode = file->f_mapping->host; 4638 4639 spin_lock(&inode->i_lock); 4640 if (atomic_read(&inode->i_writecount) > 1) { 4641 spin_unlock(&inode->i_lock); 4642 return -ETXTBSY; 4643 } 4644 atomic_set(&inode->i_writecount, -1); 4645 spin_unlock(&inode->i_lock); 4646 4647 return 0; 4648} 4649 4650void restore_bitmap_write_access(struct file *file) 4651{ 4652 struct inode *inode = file->f_mapping->host; 4653 4654 spin_lock(&inode->i_lock); 4655 atomic_set(&inode->i_writecount, 1); 4656 spin_unlock(&inode->i_lock); 4657} 4658 4659static void md_clean(mddev_t *mddev) 4660{ 4661 mddev->array_sectors = 0; 4662 mddev->external_size = 0; 4663 mddev->dev_sectors = 0; 4664 mddev->raid_disks = 0; 4665 mddev->recovery_cp = 0; 4666 mddev->resync_min = 0; 4667 mddev->resync_max = MaxSector; 4668 mddev->reshape_position = MaxSector; 4669 mddev->external = 0; 4670 mddev->persistent = 0; 4671 mddev->level = LEVEL_NONE; 4672 mddev->clevel[0] = 0; 4673 mddev->flags = 0; 4674 mddev->ro = 0; 4675 mddev->metadata_type[0] = 0; 4676 mddev->chunk_sectors = 0; 4677 mddev->ctime = mddev->utime = 0; 4678 mddev->layout = 0; 4679 mddev->max_disks = 0; 4680 mddev->events = 0; 4681 mddev->can_decrease_events = 0; 4682 mddev->delta_disks = 0; 4683 mddev->new_level = LEVEL_NONE; 4684 mddev->new_layout = 0; 4685 mddev->new_chunk_sectors = 0; 4686 mddev->curr_resync = 0; 4687 mddev->resync_mismatches = 0; 4688 mddev->suspend_lo = mddev->suspend_hi = 0; 4689 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4690 mddev->recovery = 0; 4691 mddev->in_sync = 0; 4692 mddev->degraded = 0; 4693 mddev->barriers_work = 0; 4694 mddev->safemode = 0; 4695 mddev->bitmap_info.offset = 0; 4696 mddev->bitmap_info.default_offset = 0; 4697 mddev->bitmap_info.chunksize = 0; 4698 mddev->bitmap_info.daemon_sleep = 0; 4699 mddev->bitmap_info.max_write_behind = 0; 4700 mddev->plug = NULL; 4701} 4702 4703void md_stop_writes(mddev_t *mddev) 4704{ 4705 if (mddev->sync_thread) { 4706 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4707 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4708 md_unregister_thread(mddev->sync_thread); 4709 mddev->sync_thread = NULL; 4710 } 4711 4712 del_timer_sync(&mddev->safemode_timer); 4713 4714 bitmap_flush(mddev); 4715 md_super_wait(mddev); 4716 4717 if (!mddev->in_sync || mddev->flags) { 4718 /* mark array as shutdown cleanly */ 4719 mddev->in_sync = 1; 4720 md_update_sb(mddev, 1); 4721 } 4722} 4723EXPORT_SYMBOL_GPL(md_stop_writes); 4724 4725void md_stop(mddev_t *mddev) 4726{ 4727 mddev->ready = 0; 4728 mddev->pers->stop(mddev); 4729 if (mddev->pers->sync_request && mddev->to_remove == NULL) 4730 mddev->to_remove = &md_redundancy_group; 4731 module_put(mddev->pers->owner); 4732 mddev->pers = NULL; 4733 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4734} 4735EXPORT_SYMBOL_GPL(md_stop); 4736 4737static int md_set_readonly(mddev_t *mddev, int is_open) 4738{ 4739 int err = 0; 4740 mutex_lock(&mddev->open_mutex); 4741 if (atomic_read(&mddev->openers) > is_open) { 4742 printk("md: %s still in use.\n",mdname(mddev)); 4743 err = -EBUSY; 4744 goto out; 4745 } 4746 if (mddev->pers) { 4747 md_stop_writes(mddev); 4748 4749 err = -ENXIO; 4750 if (mddev->ro==1) 4751 goto out; 4752 mddev->ro = 1; 4753 set_disk_ro(mddev->gendisk, 1); 4754 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4755 sysfs_notify_dirent_safe(mddev->sysfs_state); 4756 err = 0; 4757 } 4758out: 4759 mutex_unlock(&mddev->open_mutex); 4760 return err; 4761} 4762 4763/* mode: 4764 * 0 - completely stop and dis-assemble array 4765 * 2 - stop but do not disassemble array 4766 */ 4767static int do_md_stop(mddev_t * mddev, int mode, int is_open) 4768{ 4769 struct gendisk *disk = mddev->gendisk; 4770 mdk_rdev_t *rdev; 4771 4772 mutex_lock(&mddev->open_mutex); 4773 if (atomic_read(&mddev->openers) > is_open || 4774 mddev->sysfs_active) { 4775 printk("md: %s still in use.\n",mdname(mddev)); 4776 mutex_unlock(&mddev->open_mutex); 4777 return -EBUSY; 4778 } 4779 4780 if (mddev->pers) { 4781 if (mddev->ro) 4782 set_disk_ro(disk, 0); 4783 4784 md_stop_writes(mddev); 4785 md_stop(mddev); 4786 mddev->queue->merge_bvec_fn = NULL; 4787 mddev->queue->unplug_fn = NULL; 4788 mddev->queue->backing_dev_info.congested_fn = NULL; 4789 4790 /* tell userspace to handle 'inactive' */ 4791 sysfs_notify_dirent_safe(mddev->sysfs_state); 4792 4793 list_for_each_entry(rdev, &mddev->disks, same_set) 4794 if (rdev->raid_disk >= 0) { 4795 char nm[20]; 4796 sprintf(nm, "rd%d", rdev->raid_disk); 4797 sysfs_remove_link(&mddev->kobj, nm); 4798 } 4799 4800 set_capacity(disk, 0); 4801 mutex_unlock(&mddev->open_mutex); 4802 revalidate_disk(disk); 4803 4804 if (mddev->ro) 4805 mddev->ro = 0; 4806 } else 4807 mutex_unlock(&mddev->open_mutex); 4808 /* 4809 * Free resources if final stop 4810 */ 4811 if (mode == 0) { 4812 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4813 4814 bitmap_destroy(mddev); 4815 if (mddev->bitmap_info.file) { 4816 restore_bitmap_write_access(mddev->bitmap_info.file); 4817 fput(mddev->bitmap_info.file); 4818 mddev->bitmap_info.file = NULL; 4819 } 4820 mddev->bitmap_info.offset = 0; 4821 4822 export_array(mddev); 4823 4824 md_clean(mddev); 4825 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4826 if (mddev->hold_active == UNTIL_STOP) 4827 mddev->hold_active = 0; 4828 } 4829 blk_integrity_unregister(disk); 4830 md_new_event(mddev); 4831 sysfs_notify_dirent_safe(mddev->sysfs_state); 4832 return 0; 4833} 4834 4835#ifndef MODULE 4836static void autorun_array(mddev_t *mddev) 4837{ 4838 mdk_rdev_t *rdev; 4839 int err; 4840 4841 if (list_empty(&mddev->disks)) 4842 return; 4843 4844 printk(KERN_INFO "md: running: "); 4845 4846 list_for_each_entry(rdev, &mddev->disks, same_set) { 4847 char b[BDEVNAME_SIZE]; 4848 printk("<%s>", bdevname(rdev->bdev,b)); 4849 } 4850 printk("\n"); 4851 4852 err = do_md_run(mddev); 4853 if (err) { 4854 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4855 do_md_stop(mddev, 0, 0); 4856 } 4857} 4858 4859/* 4860 * lets try to run arrays based on all disks that have arrived 4861 * until now. (those are in pending_raid_disks) 4862 * 4863 * the method: pick the first pending disk, collect all disks with 4864 * the same UUID, remove all from the pending list and put them into 4865 * the 'same_array' list. Then order this list based on superblock 4866 * update time (freshest comes first), kick out 'old' disks and 4867 * compare superblocks. If everything's fine then run it. 4868 * 4869 * If "unit" is allocated, then bump its reference count 4870 */ 4871static void autorun_devices(int part) 4872{ 4873 mdk_rdev_t *rdev0, *rdev, *tmp; 4874 mddev_t *mddev; 4875 char b[BDEVNAME_SIZE]; 4876 4877 printk(KERN_INFO "md: autorun ...\n"); 4878 while (!list_empty(&pending_raid_disks)) { 4879 int unit; 4880 dev_t dev; 4881 LIST_HEAD(candidates); 4882 rdev0 = list_entry(pending_raid_disks.next, 4883 mdk_rdev_t, same_set); 4884 4885 printk(KERN_INFO "md: considering %s ...\n", 4886 bdevname(rdev0->bdev,b)); 4887 INIT_LIST_HEAD(&candidates); 4888 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4889 if (super_90_load(rdev, rdev0, 0) >= 0) { 4890 printk(KERN_INFO "md: adding %s ...\n", 4891 bdevname(rdev->bdev,b)); 4892 list_move(&rdev->same_set, &candidates); 4893 } 4894 /* 4895 * now we have a set of devices, with all of them having 4896 * mostly sane superblocks. It's time to allocate the 4897 * mddev. 4898 */ 4899 if (part) { 4900 dev = MKDEV(mdp_major, 4901 rdev0->preferred_minor << MdpMinorShift); 4902 unit = MINOR(dev) >> MdpMinorShift; 4903 } else { 4904 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4905 unit = MINOR(dev); 4906 } 4907 if (rdev0->preferred_minor != unit) { 4908 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4909 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4910 break; 4911 } 4912 4913 md_probe(dev, NULL, NULL); 4914 mddev = mddev_find(dev); 4915 if (!mddev || !mddev->gendisk) { 4916 if (mddev) 4917 mddev_put(mddev); 4918 printk(KERN_ERR 4919 "md: cannot allocate memory for md drive.\n"); 4920 break; 4921 } 4922 if (mddev_lock(mddev)) 4923 printk(KERN_WARNING "md: %s locked, cannot run\n", 4924 mdname(mddev)); 4925 else if (mddev->raid_disks || mddev->major_version 4926 || !list_empty(&mddev->disks)) { 4927 printk(KERN_WARNING 4928 "md: %s already running, cannot run %s\n", 4929 mdname(mddev), bdevname(rdev0->bdev,b)); 4930 mddev_unlock(mddev); 4931 } else { 4932 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4933 mddev->persistent = 1; 4934 rdev_for_each_list(rdev, tmp, &candidates) { 4935 list_del_init(&rdev->same_set); 4936 if (bind_rdev_to_array(rdev, mddev)) 4937 export_rdev(rdev); 4938 } 4939 autorun_array(mddev); 4940 mddev_unlock(mddev); 4941 } 4942 /* on success, candidates will be empty, on error 4943 * it won't... 4944 */ 4945 rdev_for_each_list(rdev, tmp, &candidates) { 4946 list_del_init(&rdev->same_set); 4947 export_rdev(rdev); 4948 } 4949 mddev_put(mddev); 4950 } 4951 printk(KERN_INFO "md: ... autorun DONE.\n"); 4952} 4953#endif /* !MODULE */ 4954 4955static int get_version(void __user * arg) 4956{ 4957 mdu_version_t ver; 4958 4959 ver.major = MD_MAJOR_VERSION; 4960 ver.minor = MD_MINOR_VERSION; 4961 ver.patchlevel = MD_PATCHLEVEL_VERSION; 4962 4963 if (copy_to_user(arg, &ver, sizeof(ver))) 4964 return -EFAULT; 4965 4966 return 0; 4967} 4968 4969static int get_array_info(mddev_t * mddev, void __user * arg) 4970{ 4971 mdu_array_info_t info; 4972 int nr,working,insync,failed,spare; 4973 mdk_rdev_t *rdev; 4974 4975 nr=working=insync=failed=spare=0; 4976 list_for_each_entry(rdev, &mddev->disks, same_set) { 4977 nr++; 4978 if (test_bit(Faulty, &rdev->flags)) 4979 failed++; 4980 else { 4981 working++; 4982 if (test_bit(In_sync, &rdev->flags)) 4983 insync++; 4984 else 4985 spare++; 4986 } 4987 } 4988 4989 info.major_version = mddev->major_version; 4990 info.minor_version = mddev->minor_version; 4991 info.patch_version = MD_PATCHLEVEL_VERSION; 4992 info.ctime = mddev->ctime; 4993 info.level = mddev->level; 4994 info.size = mddev->dev_sectors / 2; 4995 if (info.size != mddev->dev_sectors / 2) /* overflow */ 4996 info.size = -1; 4997 info.nr_disks = nr; 4998 info.raid_disks = mddev->raid_disks; 4999 info.md_minor = mddev->md_minor; 5000 info.not_persistent= !mddev->persistent; 5001 5002 info.utime = mddev->utime; 5003 info.state = 0; 5004 if (mddev->in_sync) 5005 info.state = (1<<MD_SB_CLEAN); 5006 if (mddev->bitmap && mddev->bitmap_info.offset) 5007 info.state = (1<<MD_SB_BITMAP_PRESENT); 5008 info.active_disks = insync; 5009 info.working_disks = working; 5010 info.failed_disks = failed; 5011 info.spare_disks = spare; 5012 5013 info.layout = mddev->layout; 5014 info.chunk_size = mddev->chunk_sectors << 9; 5015 5016 if (copy_to_user(arg, &info, sizeof(info))) 5017 return -EFAULT; 5018 5019 return 0; 5020} 5021 5022static int get_bitmap_file(mddev_t * mddev, void __user * arg) 5023{ 5024 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5025 char *ptr, *buf = NULL; 5026 int err = -ENOMEM; 5027 5028 if (md_allow_write(mddev)) 5029 file = kmalloc(sizeof(*file), GFP_NOIO); 5030 else 5031 file = kmalloc(sizeof(*file), GFP_KERNEL); 5032 5033 if (!file) 5034 goto out; 5035 5036 /* bitmap disabled, zero the first byte and copy out */ 5037 if (!mddev->bitmap || !mddev->bitmap->file) { 5038 file->pathname[0] = '\0'; 5039 goto copy_out; 5040 } 5041 5042 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5043 if (!buf) 5044 goto out; 5045 5046 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5047 if (IS_ERR(ptr)) 5048 goto out; 5049 5050 strcpy(file->pathname, ptr); 5051 5052copy_out: 5053 err = 0; 5054 if (copy_to_user(arg, file, sizeof(*file))) 5055 err = -EFAULT; 5056out: 5057 kfree(buf); 5058 kfree(file); 5059 return err; 5060} 5061 5062static int get_disk_info(mddev_t * mddev, void __user * arg) 5063{ 5064 mdu_disk_info_t info; 5065 mdk_rdev_t *rdev; 5066 5067 if (copy_from_user(&info, arg, sizeof(info))) 5068 return -EFAULT; 5069 5070 rdev = find_rdev_nr(mddev, info.number); 5071 if (rdev) { 5072 info.major = MAJOR(rdev->bdev->bd_dev); 5073 info.minor = MINOR(rdev->bdev->bd_dev); 5074 info.raid_disk = rdev->raid_disk; 5075 info.state = 0; 5076 if (test_bit(Faulty, &rdev->flags)) 5077 info.state |= (1<<MD_DISK_FAULTY); 5078 else if (test_bit(In_sync, &rdev->flags)) { 5079 info.state |= (1<<MD_DISK_ACTIVE); 5080 info.state |= (1<<MD_DISK_SYNC); 5081 } 5082 if (test_bit(WriteMostly, &rdev->flags)) 5083 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5084 } else { 5085 info.major = info.minor = 0; 5086 info.raid_disk = -1; 5087 info.state = (1<<MD_DISK_REMOVED); 5088 } 5089 5090 if (copy_to_user(arg, &info, sizeof(info))) 5091 return -EFAULT; 5092 5093 return 0; 5094} 5095 5096static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 5097{ 5098 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5099 mdk_rdev_t *rdev; 5100 dev_t dev = MKDEV(info->major,info->minor); 5101 5102 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5103 return -EOVERFLOW; 5104 5105 if (!mddev->raid_disks) { 5106 int err; 5107 /* expecting a device which has a superblock */ 5108 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5109 if (IS_ERR(rdev)) { 5110 printk(KERN_WARNING 5111 "md: md_import_device returned %ld\n", 5112 PTR_ERR(rdev)); 5113 return PTR_ERR(rdev); 5114 } 5115 if (!list_empty(&mddev->disks)) { 5116 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 5117 mdk_rdev_t, same_set); 5118 err = super_types[mddev->major_version] 5119 .load_super(rdev, rdev0, mddev->minor_version); 5120 if (err < 0) { 5121 printk(KERN_WARNING 5122 "md: %s has different UUID to %s\n", 5123 bdevname(rdev->bdev,b), 5124 bdevname(rdev0->bdev,b2)); 5125 export_rdev(rdev); 5126 return -EINVAL; 5127 } 5128 } 5129 err = bind_rdev_to_array(rdev, mddev); 5130 if (err) 5131 export_rdev(rdev); 5132 return err; 5133 } 5134 5135 /* 5136 * add_new_disk can be used once the array is assembled 5137 * to add "hot spares". They must already have a superblock 5138 * written 5139 */ 5140 if (mddev->pers) { 5141 int err; 5142 if (!mddev->pers->hot_add_disk) { 5143 printk(KERN_WARNING 5144 "%s: personality does not support diskops!\n", 5145 mdname(mddev)); 5146 return -EINVAL; 5147 } 5148 if (mddev->persistent) 5149 rdev = md_import_device(dev, mddev->major_version, 5150 mddev->minor_version); 5151 else 5152 rdev = md_import_device(dev, -1, -1); 5153 if (IS_ERR(rdev)) { 5154 printk(KERN_WARNING 5155 "md: md_import_device returned %ld\n", 5156 PTR_ERR(rdev)); 5157 return PTR_ERR(rdev); 5158 } 5159 /* set saved_raid_disk if appropriate */ 5160 if (!mddev->persistent) { 5161 if (info->state & (1<<MD_DISK_SYNC) && 5162 info->raid_disk < mddev->raid_disks) { 5163 rdev->raid_disk = info->raid_disk; 5164 set_bit(In_sync, &rdev->flags); 5165 } else 5166 rdev->raid_disk = -1; 5167 } else 5168 super_types[mddev->major_version]. 5169 validate_super(mddev, rdev); 5170 if (test_bit(In_sync, &rdev->flags)) 5171 rdev->saved_raid_disk = rdev->raid_disk; 5172 else 5173 rdev->saved_raid_disk = -1; 5174 5175 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5176 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5177 set_bit(WriteMostly, &rdev->flags); 5178 else 5179 clear_bit(WriteMostly, &rdev->flags); 5180 5181 rdev->raid_disk = -1; 5182 err = bind_rdev_to_array(rdev, mddev); 5183 if (!err && !mddev->pers->hot_remove_disk) { 5184 /* If there is hot_add_disk but no hot_remove_disk 5185 * then added disks for geometry changes, 5186 * and should be added immediately. 5187 */ 5188 super_types[mddev->major_version]. 5189 validate_super(mddev, rdev); 5190 err = mddev->pers->hot_add_disk(mddev, rdev); 5191 if (err) 5192 unbind_rdev_from_array(rdev); 5193 } 5194 if (err) 5195 export_rdev(rdev); 5196 else 5197 sysfs_notify_dirent_safe(rdev->sysfs_state); 5198 5199 md_update_sb(mddev, 1); 5200 if (mddev->degraded) 5201 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5202 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5203 md_wakeup_thread(mddev->thread); 5204 return err; 5205 } 5206 5207 /* otherwise, add_new_disk is only allowed 5208 * for major_version==0 superblocks 5209 */ 5210 if (mddev->major_version != 0) { 5211 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5212 mdname(mddev)); 5213 return -EINVAL; 5214 } 5215 5216 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5217 int err; 5218 rdev = md_import_device(dev, -1, 0); 5219 if (IS_ERR(rdev)) { 5220 printk(KERN_WARNING 5221 "md: error, md_import_device() returned %ld\n", 5222 PTR_ERR(rdev)); 5223 return PTR_ERR(rdev); 5224 } 5225 rdev->desc_nr = info->number; 5226 if (info->raid_disk < mddev->raid_disks) 5227 rdev->raid_disk = info->raid_disk; 5228 else 5229 rdev->raid_disk = -1; 5230 5231 if (rdev->raid_disk < mddev->raid_disks) 5232 if (info->state & (1<<MD_DISK_SYNC)) 5233 set_bit(In_sync, &rdev->flags); 5234 5235 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5236 set_bit(WriteMostly, &rdev->flags); 5237 5238 if (!mddev->persistent) { 5239 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5240 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5241 } else 5242 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5243 rdev->sectors = rdev->sb_start; 5244 5245 err = bind_rdev_to_array(rdev, mddev); 5246 if (err) { 5247 export_rdev(rdev); 5248 return err; 5249 } 5250 } 5251 5252 return 0; 5253} 5254 5255static int hot_remove_disk(mddev_t * mddev, dev_t dev) 5256{ 5257 char b[BDEVNAME_SIZE]; 5258 mdk_rdev_t *rdev; 5259 5260 rdev = find_rdev(mddev, dev); 5261 if (!rdev) 5262 return -ENXIO; 5263 5264 if (rdev->raid_disk >= 0) 5265 goto busy; 5266 5267 kick_rdev_from_array(rdev); 5268 md_update_sb(mddev, 1); 5269 md_new_event(mddev); 5270 5271 return 0; 5272busy: 5273 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5274 bdevname(rdev->bdev,b), mdname(mddev)); 5275 return -EBUSY; 5276} 5277 5278static int hot_add_disk(mddev_t * mddev, dev_t dev) 5279{ 5280 char b[BDEVNAME_SIZE]; 5281 int err; 5282 mdk_rdev_t *rdev; 5283 5284 if (!mddev->pers) 5285 return -ENODEV; 5286 5287 if (mddev->major_version != 0) { 5288 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5289 " version-0 superblocks.\n", 5290 mdname(mddev)); 5291 return -EINVAL; 5292 } 5293 if (!mddev->pers->hot_add_disk) { 5294 printk(KERN_WARNING 5295 "%s: personality does not support diskops!\n", 5296 mdname(mddev)); 5297 return -EINVAL; 5298 } 5299 5300 rdev = md_import_device(dev, -1, 0); 5301 if (IS_ERR(rdev)) { 5302 printk(KERN_WARNING 5303 "md: error, md_import_device() returned %ld\n", 5304 PTR_ERR(rdev)); 5305 return -EINVAL; 5306 } 5307 5308 if (mddev->persistent) 5309 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5310 else 5311 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5312 5313 rdev->sectors = rdev->sb_start; 5314 5315 if (test_bit(Faulty, &rdev->flags)) { 5316 printk(KERN_WARNING 5317 "md: can not hot-add faulty %s disk to %s!\n", 5318 bdevname(rdev->bdev,b), mdname(mddev)); 5319 err = -EINVAL; 5320 goto abort_export; 5321 } 5322 clear_bit(In_sync, &rdev->flags); 5323 rdev->desc_nr = -1; 5324 rdev->saved_raid_disk = -1; 5325 err = bind_rdev_to_array(rdev, mddev); 5326 if (err) 5327 goto abort_export; 5328 5329 /* 5330 * The rest should better be atomic, we can have disk failures 5331 * noticed in interrupt contexts ... 5332 */ 5333 5334 rdev->raid_disk = -1; 5335 5336 md_update_sb(mddev, 1); 5337 5338 /* 5339 * Kick recovery, maybe this spare has to be added to the 5340 * array immediately. 5341 */ 5342 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5343 md_wakeup_thread(mddev->thread); 5344 md_new_event(mddev); 5345 return 0; 5346 5347abort_export: 5348 export_rdev(rdev); 5349 return err; 5350} 5351 5352static int set_bitmap_file(mddev_t *mddev, int fd) 5353{ 5354 int err; 5355 5356 if (mddev->pers) { 5357 if (!mddev->pers->quiesce) 5358 return -EBUSY; 5359 if (mddev->recovery || mddev->sync_thread) 5360 return -EBUSY; 5361 /* we should be able to change the bitmap.. */ 5362 } 5363 5364 5365 if (fd >= 0) { 5366 if (mddev->bitmap) 5367 return -EEXIST; /* cannot add when bitmap is present */ 5368 mddev->bitmap_info.file = fget(fd); 5369 5370 if (mddev->bitmap_info.file == NULL) { 5371 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5372 mdname(mddev)); 5373 return -EBADF; 5374 } 5375 5376 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5377 if (err) { 5378 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5379 mdname(mddev)); 5380 fput(mddev->bitmap_info.file); 5381 mddev->bitmap_info.file = NULL; 5382 return err; 5383 } 5384 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5385 } else if (mddev->bitmap == NULL) 5386 return -ENOENT; /* cannot remove what isn't there */ 5387 err = 0; 5388 if (mddev->pers) { 5389 mddev->pers->quiesce(mddev, 1); 5390 if (fd >= 0) { 5391 err = bitmap_create(mddev); 5392 if (!err) 5393 err = bitmap_load(mddev); 5394 } 5395 if (fd < 0 || err) { 5396 bitmap_destroy(mddev); 5397 fd = -1; /* make sure to put the file */ 5398 } 5399 mddev->pers->quiesce(mddev, 0); 5400 } 5401 if (fd < 0) { 5402 if (mddev->bitmap_info.file) { 5403 restore_bitmap_write_access(mddev->bitmap_info.file); 5404 fput(mddev->bitmap_info.file); 5405 } 5406 mddev->bitmap_info.file = NULL; 5407 } 5408 5409 return err; 5410} 5411 5412/* 5413 * set_array_info is used two different ways 5414 * The original usage is when creating a new array. 5415 * In this usage, raid_disks is > 0 and it together with 5416 * level, size, not_persistent,layout,chunksize determine the 5417 * shape of the array. 5418 * This will always create an array with a type-0.90.0 superblock. 5419 * The newer usage is when assembling an array. 5420 * In this case raid_disks will be 0, and the major_version field is 5421 * use to determine which style super-blocks are to be found on the devices. 5422 * The minor and patch _version numbers are also kept incase the 5423 * super_block handler wishes to interpret them. 5424 */ 5425static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5426{ 5427 5428 if (info->raid_disks == 0) { 5429 /* just setting version number for superblock loading */ 5430 if (info->major_version < 0 || 5431 info->major_version >= ARRAY_SIZE(super_types) || 5432 super_types[info->major_version].name == NULL) { 5433 /* maybe try to auto-load a module? */ 5434 printk(KERN_INFO 5435 "md: superblock version %d not known\n", 5436 info->major_version); 5437 return -EINVAL; 5438 } 5439 mddev->major_version = info->major_version; 5440 mddev->minor_version = info->minor_version; 5441 mddev->patch_version = info->patch_version; 5442 mddev->persistent = !info->not_persistent; 5443 /* ensure mddev_put doesn't delete this now that there 5444 * is some minimal configuration. 5445 */ 5446 mddev->ctime = get_seconds(); 5447 return 0; 5448 } 5449 mddev->major_version = MD_MAJOR_VERSION; 5450 mddev->minor_version = MD_MINOR_VERSION; 5451 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5452 mddev->ctime = get_seconds(); 5453 5454 mddev->level = info->level; 5455 mddev->clevel[0] = 0; 5456 mddev->dev_sectors = 2 * (sector_t)info->size; 5457 mddev->raid_disks = info->raid_disks; 5458 /* don't set md_minor, it is determined by which /dev/md* was 5459 * openned 5460 */ 5461 if (info->state & (1<<MD_SB_CLEAN)) 5462 mddev->recovery_cp = MaxSector; 5463 else 5464 mddev->recovery_cp = 0; 5465 mddev->persistent = ! info->not_persistent; 5466 mddev->external = 0; 5467 5468 mddev->layout = info->layout; 5469 mddev->chunk_sectors = info->chunk_size >> 9; 5470 5471 mddev->max_disks = MD_SB_DISKS; 5472 5473 if (mddev->persistent) 5474 mddev->flags = 0; 5475 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5476 5477 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5478 mddev->bitmap_info.offset = 0; 5479 5480 mddev->reshape_position = MaxSector; 5481 5482 /* 5483 * Generate a 128 bit UUID 5484 */ 5485 get_random_bytes(mddev->uuid, 16); 5486 5487 mddev->new_level = mddev->level; 5488 mddev->new_chunk_sectors = mddev->chunk_sectors; 5489 mddev->new_layout = mddev->layout; 5490 mddev->delta_disks = 0; 5491 5492 return 0; 5493} 5494 5495void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5496{ 5497 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5498 5499 if (mddev->external_size) 5500 return; 5501 5502 mddev->array_sectors = array_sectors; 5503} 5504EXPORT_SYMBOL(md_set_array_sectors); 5505 5506static int update_size(mddev_t *mddev, sector_t num_sectors) 5507{ 5508 mdk_rdev_t *rdev; 5509 int rv; 5510 int fit = (num_sectors == 0); 5511 5512 if (mddev->pers->resize == NULL) 5513 return -EINVAL; 5514 /* The "num_sectors" is the number of sectors of each device that 5515 * is used. This can only make sense for arrays with redundancy. 5516 * linear and raid0 always use whatever space is available. We can only 5517 * consider changing this number if no resync or reconstruction is 5518 * happening, and if the new size is acceptable. It must fit before the 5519 * sb_start or, if that is <data_offset, it must fit before the size 5520 * of each device. If num_sectors is zero, we find the largest size 5521 * that fits. 5522 5523 */ 5524 if (mddev->sync_thread) 5525 return -EBUSY; 5526 if (mddev->bitmap) 5527 /* Sorry, cannot grow a bitmap yet, just remove it, 5528 * grow, and re-add. 5529 */ 5530 return -EBUSY; 5531 list_for_each_entry(rdev, &mddev->disks, same_set) { 5532 sector_t avail = rdev->sectors; 5533 5534 if (fit && (num_sectors == 0 || num_sectors > avail)) 5535 num_sectors = avail; 5536 if (avail < num_sectors) 5537 return -ENOSPC; 5538 } 5539 rv = mddev->pers->resize(mddev, num_sectors); 5540 if (!rv) 5541 revalidate_disk(mddev->gendisk); 5542 return rv; 5543} 5544 5545static int update_raid_disks(mddev_t *mddev, int raid_disks) 5546{ 5547 int rv; 5548 /* change the number of raid disks */ 5549 if (mddev->pers->check_reshape == NULL) 5550 return -EINVAL; 5551 if (raid_disks <= 0 || 5552 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5553 return -EINVAL; 5554 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5555 return -EBUSY; 5556 mddev->delta_disks = raid_disks - mddev->raid_disks; 5557 5558 rv = mddev->pers->check_reshape(mddev); 5559 return rv; 5560} 5561 5562 5563/* 5564 * update_array_info is used to change the configuration of an 5565 * on-line array. 5566 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5567 * fields in the info are checked against the array. 5568 * Any differences that cannot be handled will cause an error. 5569 * Normally, only one change can be managed at a time. 5570 */ 5571static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5572{ 5573 int rv = 0; 5574 int cnt = 0; 5575 int state = 0; 5576 5577 /* calculate expected state,ignoring low bits */ 5578 if (mddev->bitmap && mddev->bitmap_info.offset) 5579 state |= (1 << MD_SB_BITMAP_PRESENT); 5580 5581 if (mddev->major_version != info->major_version || 5582 mddev->minor_version != info->minor_version || 5583/* mddev->patch_version != info->patch_version || */ 5584 mddev->ctime != info->ctime || 5585 mddev->level != info->level || 5586/* mddev->layout != info->layout || */ 5587 !mddev->persistent != info->not_persistent|| 5588 mddev->chunk_sectors != info->chunk_size >> 9 || 5589 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5590 ((state^info->state) & 0xfffffe00) 5591 ) 5592 return -EINVAL; 5593 /* Check there is only one change */ 5594 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5595 cnt++; 5596 if (mddev->raid_disks != info->raid_disks) 5597 cnt++; 5598 if (mddev->layout != info->layout) 5599 cnt++; 5600 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5601 cnt++; 5602 if (cnt == 0) 5603 return 0; 5604 if (cnt > 1) 5605 return -EINVAL; 5606 5607 if (mddev->layout != info->layout) { 5608 /* Change layout 5609 * we don't need to do anything at the md level, the 5610 * personality will take care of it all. 5611 */ 5612 if (mddev->pers->check_reshape == NULL) 5613 return -EINVAL; 5614 else { 5615 mddev->new_layout = info->layout; 5616 rv = mddev->pers->check_reshape(mddev); 5617 if (rv) 5618 mddev->new_layout = mddev->layout; 5619 return rv; 5620 } 5621 } 5622 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5623 rv = update_size(mddev, (sector_t)info->size * 2); 5624 5625 if (mddev->raid_disks != info->raid_disks) 5626 rv = update_raid_disks(mddev, info->raid_disks); 5627 5628 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5629 if (mddev->pers->quiesce == NULL) 5630 return -EINVAL; 5631 if (mddev->recovery || mddev->sync_thread) 5632 return -EBUSY; 5633 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5634 /* add the bitmap */ 5635 if (mddev->bitmap) 5636 return -EEXIST; 5637 if (mddev->bitmap_info.default_offset == 0) 5638 return -EINVAL; 5639 mddev->bitmap_info.offset = 5640 mddev->bitmap_info.default_offset; 5641 mddev->pers->quiesce(mddev, 1); 5642 rv = bitmap_create(mddev); 5643 if (!rv) 5644 rv = bitmap_load(mddev); 5645 if (rv) 5646 bitmap_destroy(mddev); 5647 mddev->pers->quiesce(mddev, 0); 5648 } else { 5649 /* remove the bitmap */ 5650 if (!mddev->bitmap) 5651 return -ENOENT; 5652 if (mddev->bitmap->file) 5653 return -EINVAL; 5654 mddev->pers->quiesce(mddev, 1); 5655 bitmap_destroy(mddev); 5656 mddev->pers->quiesce(mddev, 0); 5657 mddev->bitmap_info.offset = 0; 5658 } 5659 } 5660 md_update_sb(mddev, 1); 5661 return rv; 5662} 5663 5664static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5665{ 5666 mdk_rdev_t *rdev; 5667 5668 if (mddev->pers == NULL) 5669 return -ENODEV; 5670 5671 rdev = find_rdev(mddev, dev); 5672 if (!rdev) 5673 return -ENODEV; 5674 5675 md_error(mddev, rdev); 5676 return 0; 5677} 5678 5679/* 5680 * We have a problem here : there is no easy way to give a CHS 5681 * virtual geometry. We currently pretend that we have a 2 heads 5682 * 4 sectors (with a BIG number of cylinders...). This drives 5683 * dosfs just mad... ;-) 5684 */ 5685static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5686{ 5687 mddev_t *mddev = bdev->bd_disk->private_data; 5688 5689 geo->heads = 2; 5690 geo->sectors = 4; 5691 geo->cylinders = mddev->array_sectors / 8; 5692 return 0; 5693} 5694 5695static int md_ioctl(struct block_device *bdev, fmode_t mode, 5696 unsigned int cmd, unsigned long arg) 5697{ 5698 int err = 0; 5699 void __user *argp = (void __user *)arg; 5700 mddev_t *mddev = NULL; 5701 int ro; 5702 5703 if (!capable(CAP_SYS_ADMIN)) 5704 return -EACCES; 5705 5706 /* 5707 * Commands dealing with the RAID driver but not any 5708 * particular array: 5709 */ 5710 switch (cmd) 5711 { 5712 case RAID_VERSION: 5713 err = get_version(argp); 5714 goto done; 5715 5716 case PRINT_RAID_DEBUG: 5717 err = 0; 5718 md_print_devices(); 5719 goto done; 5720 5721#ifndef MODULE 5722 case RAID_AUTORUN: 5723 err = 0; 5724 autostart_arrays(arg); 5725 goto done; 5726#endif 5727 default:; 5728 } 5729 5730 /* 5731 * Commands creating/starting a new array: 5732 */ 5733 5734 mddev = bdev->bd_disk->private_data; 5735 5736 if (!mddev) { 5737 BUG(); 5738 goto abort; 5739 } 5740 5741 err = mddev_lock(mddev); 5742 if (err) { 5743 printk(KERN_INFO 5744 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5745 err, cmd); 5746 goto abort; 5747 } 5748 5749 switch (cmd) 5750 { 5751 case SET_ARRAY_INFO: 5752 { 5753 mdu_array_info_t info; 5754 if (!arg) 5755 memset(&info, 0, sizeof(info)); 5756 else if (copy_from_user(&info, argp, sizeof(info))) { 5757 err = -EFAULT; 5758 goto abort_unlock; 5759 } 5760 if (mddev->pers) { 5761 err = update_array_info(mddev, &info); 5762 if (err) { 5763 printk(KERN_WARNING "md: couldn't update" 5764 " array info. %d\n", err); 5765 goto abort_unlock; 5766 } 5767 goto done_unlock; 5768 } 5769 if (!list_empty(&mddev->disks)) { 5770 printk(KERN_WARNING 5771 "md: array %s already has disks!\n", 5772 mdname(mddev)); 5773 err = -EBUSY; 5774 goto abort_unlock; 5775 } 5776 if (mddev->raid_disks) { 5777 printk(KERN_WARNING 5778 "md: array %s already initialised!\n", 5779 mdname(mddev)); 5780 err = -EBUSY; 5781 goto abort_unlock; 5782 } 5783 err = set_array_info(mddev, &info); 5784 if (err) { 5785 printk(KERN_WARNING "md: couldn't set" 5786 " array info. %d\n", err); 5787 goto abort_unlock; 5788 } 5789 } 5790 goto done_unlock; 5791 5792 default:; 5793 } 5794 5795 /* 5796 * Commands querying/configuring an existing array: 5797 */ 5798 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5799 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5800 if ((!mddev->raid_disks && !mddev->external) 5801 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5802 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5803 && cmd != GET_BITMAP_FILE) { 5804 err = -ENODEV; 5805 goto abort_unlock; 5806 } 5807 5808 /* 5809 * Commands even a read-only array can execute: 5810 */ 5811 switch (cmd) 5812 { 5813 case GET_ARRAY_INFO: 5814 err = get_array_info(mddev, argp); 5815 goto done_unlock; 5816 5817 case GET_BITMAP_FILE: 5818 err = get_bitmap_file(mddev, argp); 5819 goto done_unlock; 5820 5821 case GET_DISK_INFO: 5822 err = get_disk_info(mddev, argp); 5823 goto done_unlock; 5824 5825 case RESTART_ARRAY_RW: 5826 err = restart_array(mddev); 5827 goto done_unlock; 5828 5829 case STOP_ARRAY: 5830 err = do_md_stop(mddev, 0, 1); 5831 goto done_unlock; 5832 5833 case STOP_ARRAY_RO: 5834 err = md_set_readonly(mddev, 1); 5835 goto done_unlock; 5836 5837 case BLKROSET: 5838 if (get_user(ro, (int __user *)(arg))) { 5839 err = -EFAULT; 5840 goto done_unlock; 5841 } 5842 err = -EINVAL; 5843 5844 /* if the bdev is going readonly the value of mddev->ro 5845 * does not matter, no writes are coming 5846 */ 5847 if (ro) 5848 goto done_unlock; 5849 5850 /* are we are already prepared for writes? */ 5851 if (mddev->ro != 1) 5852 goto done_unlock; 5853 5854 /* transitioning to readauto need only happen for 5855 * arrays that call md_write_start 5856 */ 5857 if (mddev->pers) { 5858 err = restart_array(mddev); 5859 if (err == 0) { 5860 mddev->ro = 2; 5861 set_disk_ro(mddev->gendisk, 0); 5862 } 5863 } 5864 goto done_unlock; 5865 } 5866 5867 /* 5868 * The remaining ioctls are changing the state of the 5869 * superblock, so we do not allow them on read-only arrays. 5870 * However non-MD ioctls (e.g. get-size) will still come through 5871 * here and hit the 'default' below, so only disallow 5872 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5873 */ 5874 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5875 if (mddev->ro == 2) { 5876 mddev->ro = 0; 5877 sysfs_notify_dirent_safe(mddev->sysfs_state); 5878 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5879 md_wakeup_thread(mddev->thread); 5880 } else { 5881 err = -EROFS; 5882 goto abort_unlock; 5883 } 5884 } 5885 5886 switch (cmd) 5887 { 5888 case ADD_NEW_DISK: 5889 { 5890 mdu_disk_info_t info; 5891 if (copy_from_user(&info, argp, sizeof(info))) 5892 err = -EFAULT; 5893 else 5894 err = add_new_disk(mddev, &info); 5895 goto done_unlock; 5896 } 5897 5898 case HOT_REMOVE_DISK: 5899 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5900 goto done_unlock; 5901 5902 case HOT_ADD_DISK: 5903 err = hot_add_disk(mddev, new_decode_dev(arg)); 5904 goto done_unlock; 5905 5906 case SET_DISK_FAULTY: 5907 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5908 goto done_unlock; 5909 5910 case RUN_ARRAY: 5911 err = do_md_run(mddev); 5912 goto done_unlock; 5913 5914 case SET_BITMAP_FILE: 5915 err = set_bitmap_file(mddev, (int)arg); 5916 goto done_unlock; 5917 5918 default: 5919 err = -EINVAL; 5920 goto abort_unlock; 5921 } 5922 5923done_unlock: 5924abort_unlock: 5925 if (mddev->hold_active == UNTIL_IOCTL && 5926 err != -EINVAL) 5927 mddev->hold_active = 0; 5928 mddev_unlock(mddev); 5929 5930 return err; 5931done: 5932 if (err) 5933 MD_BUG(); 5934abort: 5935 return err; 5936} 5937#ifdef CONFIG_COMPAT 5938static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 5939 unsigned int cmd, unsigned long arg) 5940{ 5941 switch (cmd) { 5942 case HOT_REMOVE_DISK: 5943 case HOT_ADD_DISK: 5944 case SET_DISK_FAULTY: 5945 case SET_BITMAP_FILE: 5946 /* These take in integer arg, do not convert */ 5947 break; 5948 default: 5949 arg = (unsigned long)compat_ptr(arg); 5950 break; 5951 } 5952 5953 return md_ioctl(bdev, mode, cmd, arg); 5954} 5955#endif /* CONFIG_COMPAT */ 5956 5957static int md_open(struct block_device *bdev, fmode_t mode) 5958{ 5959 /* 5960 * Succeed if we can lock the mddev, which confirms that 5961 * it isn't being stopped right now. 5962 */ 5963 mddev_t *mddev = mddev_find(bdev->bd_dev); 5964 int err; 5965 5966 lock_kernel(); 5967 if (mddev->gendisk != bdev->bd_disk) { 5968 /* we are racing with mddev_put which is discarding this 5969 * bd_disk. 5970 */ 5971 mddev_put(mddev); 5972 /* Wait until bdev->bd_disk is definitely gone */ 5973 flush_scheduled_work(); 5974 /* Then retry the open from the top */ 5975 unlock_kernel(); 5976 return -ERESTARTSYS; 5977 } 5978 BUG_ON(mddev != bdev->bd_disk->private_data); 5979 5980 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 5981 goto out; 5982 5983 err = 0; 5984 atomic_inc(&mddev->openers); 5985 mutex_unlock(&mddev->open_mutex); 5986 5987 check_disk_size_change(mddev->gendisk, bdev); 5988 out: 5989 unlock_kernel(); 5990 return err; 5991} 5992 5993static int md_release(struct gendisk *disk, fmode_t mode) 5994{ 5995 mddev_t *mddev = disk->private_data; 5996 5997 BUG_ON(!mddev); 5998 lock_kernel(); 5999 atomic_dec(&mddev->openers); 6000 mddev_put(mddev); 6001 unlock_kernel(); 6002 6003 return 0; 6004} 6005static const struct block_device_operations md_fops = 6006{ 6007 .owner = THIS_MODULE, 6008 .open = md_open, 6009 .release = md_release, 6010 .ioctl = md_ioctl, 6011#ifdef CONFIG_COMPAT 6012 .compat_ioctl = md_compat_ioctl, 6013#endif 6014 .getgeo = md_getgeo, 6015}; 6016 6017static int md_thread(void * arg) 6018{ 6019 mdk_thread_t *thread = arg; 6020 6021 /* 6022 * md_thread is a 'system-thread', it's priority should be very 6023 * high. We avoid resource deadlocks individually in each 6024 * raid personality. (RAID5 does preallocation) We also use RR and 6025 * the very same RT priority as kswapd, thus we will never get 6026 * into a priority inversion deadlock. 6027 * 6028 * we definitely have to have equal or higher priority than 6029 * bdflush, otherwise bdflush will deadlock if there are too 6030 * many dirty RAID5 blocks. 6031 */ 6032 6033 allow_signal(SIGKILL); 6034 while (!kthread_should_stop()) { 6035 6036 /* We need to wait INTERRUPTIBLE so that 6037 * we don't add to the load-average. 6038 * That means we need to be sure no signals are 6039 * pending 6040 */ 6041 if (signal_pending(current)) 6042 flush_signals(current); 6043 6044 wait_event_interruptible_timeout 6045 (thread->wqueue, 6046 test_bit(THREAD_WAKEUP, &thread->flags) 6047 || kthread_should_stop(), 6048 thread->timeout); 6049 6050 clear_bit(THREAD_WAKEUP, &thread->flags); 6051 if (!kthread_should_stop()) 6052 thread->run(thread->mddev); 6053 } 6054 6055 return 0; 6056} 6057 6058void md_wakeup_thread(mdk_thread_t *thread) 6059{ 6060 if (thread) { 6061 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 6062 set_bit(THREAD_WAKEUP, &thread->flags); 6063 wake_up(&thread->wqueue); 6064 } 6065} 6066 6067mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 6068 const char *name) 6069{ 6070 mdk_thread_t *thread; 6071 6072 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 6073 if (!thread) 6074 return NULL; 6075 6076 init_waitqueue_head(&thread->wqueue); 6077 6078 thread->run = run; 6079 thread->mddev = mddev; 6080 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6081 thread->tsk = kthread_run(md_thread, thread, 6082 "%s_%s", 6083 mdname(thread->mddev), 6084 name ?: mddev->pers->name); 6085 if (IS_ERR(thread->tsk)) { 6086 kfree(thread); 6087 return NULL; 6088 } 6089 return thread; 6090} 6091 6092void md_unregister_thread(mdk_thread_t *thread) 6093{ 6094 if (!thread) 6095 return; 6096 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6097 6098 kthread_stop(thread->tsk); 6099 kfree(thread); 6100} 6101 6102void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 6103{ 6104 if (!mddev) { 6105 MD_BUG(); 6106 return; 6107 } 6108 6109 if (!rdev || test_bit(Faulty, &rdev->flags)) 6110 return; 6111 6112 if (mddev->external) 6113 set_bit(Blocked, &rdev->flags); 6114/* 6115 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 6116 mdname(mddev), 6117 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 6118 __builtin_return_address(0),__builtin_return_address(1), 6119 __builtin_return_address(2),__builtin_return_address(3)); 6120*/ 6121 if (!mddev->pers) 6122 return; 6123 if (!mddev->pers->error_handler) 6124 return; 6125 mddev->pers->error_handler(mddev,rdev); 6126 if (mddev->degraded) 6127 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6128 sysfs_notify_dirent_safe(rdev->sysfs_state); 6129 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6130 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6131 md_wakeup_thread(mddev->thread); 6132 if (mddev->event_work.func) 6133 schedule_work(&mddev->event_work); 6134 md_new_event_inintr(mddev); 6135} 6136 6137/* seq_file implementation /proc/mdstat */ 6138 6139static void status_unused(struct seq_file *seq) 6140{ 6141 int i = 0; 6142 mdk_rdev_t *rdev; 6143 6144 seq_printf(seq, "unused devices: "); 6145 6146 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6147 char b[BDEVNAME_SIZE]; 6148 i++; 6149 seq_printf(seq, "%s ", 6150 bdevname(rdev->bdev,b)); 6151 } 6152 if (!i) 6153 seq_printf(seq, "<none>"); 6154 6155 seq_printf(seq, "\n"); 6156} 6157 6158 6159static void status_resync(struct seq_file *seq, mddev_t * mddev) 6160{ 6161 sector_t max_sectors, resync, res; 6162 unsigned long dt, db; 6163 sector_t rt; 6164 int scale; 6165 unsigned int per_milli; 6166 6167 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6168 6169 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6170 max_sectors = mddev->resync_max_sectors; 6171 else 6172 max_sectors = mddev->dev_sectors; 6173 6174 /* 6175 * Should not happen. 6176 */ 6177 if (!max_sectors) { 6178 MD_BUG(); 6179 return; 6180 } 6181 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6182 * in a sector_t, and (max_sectors>>scale) will fit in a 6183 * u32, as those are the requirements for sector_div. 6184 * Thus 'scale' must be at least 10 6185 */ 6186 scale = 10; 6187 if (sizeof(sector_t) > sizeof(unsigned long)) { 6188 while ( max_sectors/2 > (1ULL<<(scale+32))) 6189 scale++; 6190 } 6191 res = (resync>>scale)*1000; 6192 sector_div(res, (u32)((max_sectors>>scale)+1)); 6193 6194 per_milli = res; 6195 { 6196 int i, x = per_milli/50, y = 20-x; 6197 seq_printf(seq, "["); 6198 for (i = 0; i < x; i++) 6199 seq_printf(seq, "="); 6200 seq_printf(seq, ">"); 6201 for (i = 0; i < y; i++) 6202 seq_printf(seq, "."); 6203 seq_printf(seq, "] "); 6204 } 6205 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6206 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6207 "reshape" : 6208 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6209 "check" : 6210 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6211 "resync" : "recovery"))), 6212 per_milli/10, per_milli % 10, 6213 (unsigned long long) resync/2, 6214 (unsigned long long) max_sectors/2); 6215 6216 /* 6217 * dt: time from mark until now 6218 * db: blocks written from mark until now 6219 * rt: remaining time 6220 * 6221 * rt is a sector_t, so could be 32bit or 64bit. 6222 * So we divide before multiply in case it is 32bit and close 6223 * to the limit. 6224 * We scale the divisor (db) by 32 to avoid loosing precision 6225 * near the end of resync when the number of remaining sectors 6226 * is close to 'db'. 6227 * We then divide rt by 32 after multiplying by db to compensate. 6228 * The '+1' avoids division by zero if db is very small. 6229 */ 6230 dt = ((jiffies - mddev->resync_mark) / HZ); 6231 if (!dt) dt++; 6232 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6233 - mddev->resync_mark_cnt; 6234 6235 rt = max_sectors - resync; /* number of remaining sectors */ 6236 sector_div(rt, db/32+1); 6237 rt *= dt; 6238 rt >>= 5; 6239 6240 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6241 ((unsigned long)rt % 60)/6); 6242 6243 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6244} 6245 6246static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6247{ 6248 struct list_head *tmp; 6249 loff_t l = *pos; 6250 mddev_t *mddev; 6251 6252 if (l >= 0x10000) 6253 return NULL; 6254 if (!l--) 6255 /* header */ 6256 return (void*)1; 6257 6258 spin_lock(&all_mddevs_lock); 6259 list_for_each(tmp,&all_mddevs) 6260 if (!l--) { 6261 mddev = list_entry(tmp, mddev_t, all_mddevs); 6262 mddev_get(mddev); 6263 spin_unlock(&all_mddevs_lock); 6264 return mddev; 6265 } 6266 spin_unlock(&all_mddevs_lock); 6267 if (!l--) 6268 return (void*)2;/* tail */ 6269 return NULL; 6270} 6271 6272static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6273{ 6274 struct list_head *tmp; 6275 mddev_t *next_mddev, *mddev = v; 6276 6277 ++*pos; 6278 if (v == (void*)2) 6279 return NULL; 6280 6281 spin_lock(&all_mddevs_lock); 6282 if (v == (void*)1) 6283 tmp = all_mddevs.next; 6284 else 6285 tmp = mddev->all_mddevs.next; 6286 if (tmp != &all_mddevs) 6287 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 6288 else { 6289 next_mddev = (void*)2; 6290 *pos = 0x10000; 6291 } 6292 spin_unlock(&all_mddevs_lock); 6293 6294 if (v != (void*)1) 6295 mddev_put(mddev); 6296 return next_mddev; 6297 6298} 6299 6300static void md_seq_stop(struct seq_file *seq, void *v) 6301{ 6302 mddev_t *mddev = v; 6303 6304 if (mddev && v != (void*)1 && v != (void*)2) 6305 mddev_put(mddev); 6306} 6307 6308struct mdstat_info { 6309 int event; 6310}; 6311 6312static int md_seq_show(struct seq_file *seq, void *v) 6313{ 6314 mddev_t *mddev = v; 6315 sector_t sectors; 6316 mdk_rdev_t *rdev; 6317 struct mdstat_info *mi = seq->private; 6318 struct bitmap *bitmap; 6319 6320 if (v == (void*)1) { 6321 struct mdk_personality *pers; 6322 seq_printf(seq, "Personalities : "); 6323 spin_lock(&pers_lock); 6324 list_for_each_entry(pers, &pers_list, list) 6325 seq_printf(seq, "[%s] ", pers->name); 6326 6327 spin_unlock(&pers_lock); 6328 seq_printf(seq, "\n"); 6329 mi->event = atomic_read(&md_event_count); 6330 return 0; 6331 } 6332 if (v == (void*)2) { 6333 status_unused(seq); 6334 return 0; 6335 } 6336 6337 if (mddev_lock(mddev) < 0) 6338 return -EINTR; 6339 6340 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6341 seq_printf(seq, "%s : %sactive", mdname(mddev), 6342 mddev->pers ? "" : "in"); 6343 if (mddev->pers) { 6344 if (mddev->ro==1) 6345 seq_printf(seq, " (read-only)"); 6346 if (mddev->ro==2) 6347 seq_printf(seq, " (auto-read-only)"); 6348 seq_printf(seq, " %s", mddev->pers->name); 6349 } 6350 6351 sectors = 0; 6352 list_for_each_entry(rdev, &mddev->disks, same_set) { 6353 char b[BDEVNAME_SIZE]; 6354 seq_printf(seq, " %s[%d]", 6355 bdevname(rdev->bdev,b), rdev->desc_nr); 6356 if (test_bit(WriteMostly, &rdev->flags)) 6357 seq_printf(seq, "(W)"); 6358 if (test_bit(Faulty, &rdev->flags)) { 6359 seq_printf(seq, "(F)"); 6360 continue; 6361 } else if (rdev->raid_disk < 0) 6362 seq_printf(seq, "(S)"); /* spare */ 6363 sectors += rdev->sectors; 6364 } 6365 6366 if (!list_empty(&mddev->disks)) { 6367 if (mddev->pers) 6368 seq_printf(seq, "\n %llu blocks", 6369 (unsigned long long) 6370 mddev->array_sectors / 2); 6371 else 6372 seq_printf(seq, "\n %llu blocks", 6373 (unsigned long long)sectors / 2); 6374 } 6375 if (mddev->persistent) { 6376 if (mddev->major_version != 0 || 6377 mddev->minor_version != 90) { 6378 seq_printf(seq," super %d.%d", 6379 mddev->major_version, 6380 mddev->minor_version); 6381 } 6382 } else if (mddev->external) 6383 seq_printf(seq, " super external:%s", 6384 mddev->metadata_type); 6385 else 6386 seq_printf(seq, " super non-persistent"); 6387 6388 if (mddev->pers) { 6389 mddev->pers->status(seq, mddev); 6390 seq_printf(seq, "\n "); 6391 if (mddev->pers->sync_request) { 6392 if (mddev->curr_resync > 2) { 6393 status_resync(seq, mddev); 6394 seq_printf(seq, "\n "); 6395 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6396 seq_printf(seq, "\tresync=DELAYED\n "); 6397 else if (mddev->recovery_cp < MaxSector) 6398 seq_printf(seq, "\tresync=PENDING\n "); 6399 } 6400 } else 6401 seq_printf(seq, "\n "); 6402 6403 if ((bitmap = mddev->bitmap)) { 6404 unsigned long chunk_kb; 6405 unsigned long flags; 6406 spin_lock_irqsave(&bitmap->lock, flags); 6407 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6408 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6409 "%lu%s chunk", 6410 bitmap->pages - bitmap->missing_pages, 6411 bitmap->pages, 6412 (bitmap->pages - bitmap->missing_pages) 6413 << (PAGE_SHIFT - 10), 6414 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6415 chunk_kb ? "KB" : "B"); 6416 if (bitmap->file) { 6417 seq_printf(seq, ", file: "); 6418 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6419 } 6420 6421 seq_printf(seq, "\n"); 6422 spin_unlock_irqrestore(&bitmap->lock, flags); 6423 } 6424 6425 seq_printf(seq, "\n"); 6426 } 6427 mddev_unlock(mddev); 6428 6429 return 0; 6430} 6431 6432static const struct seq_operations md_seq_ops = { 6433 .start = md_seq_start, 6434 .next = md_seq_next, 6435 .stop = md_seq_stop, 6436 .show = md_seq_show, 6437}; 6438 6439static int md_seq_open(struct inode *inode, struct file *file) 6440{ 6441 int error; 6442 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 6443 if (mi == NULL) 6444 return -ENOMEM; 6445 6446 error = seq_open(file, &md_seq_ops); 6447 if (error) 6448 kfree(mi); 6449 else { 6450 struct seq_file *p = file->private_data; 6451 p->private = mi; 6452 mi->event = atomic_read(&md_event_count); 6453 } 6454 return error; 6455} 6456 6457static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6458{ 6459 struct seq_file *m = filp->private_data; 6460 struct mdstat_info *mi = m->private; 6461 int mask; 6462 6463 poll_wait(filp, &md_event_waiters, wait); 6464 6465 /* always allow read */ 6466 mask = POLLIN | POLLRDNORM; 6467 6468 if (mi->event != atomic_read(&md_event_count)) 6469 mask |= POLLERR | POLLPRI; 6470 return mask; 6471} 6472 6473static const struct file_operations md_seq_fops = { 6474 .owner = THIS_MODULE, 6475 .open = md_seq_open, 6476 .read = seq_read, 6477 .llseek = seq_lseek, 6478 .release = seq_release_private, 6479 .poll = mdstat_poll, 6480}; 6481 6482int register_md_personality(struct mdk_personality *p) 6483{ 6484 spin_lock(&pers_lock); 6485 list_add_tail(&p->list, &pers_list); 6486 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6487 spin_unlock(&pers_lock); 6488 return 0; 6489} 6490 6491int unregister_md_personality(struct mdk_personality *p) 6492{ 6493 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6494 spin_lock(&pers_lock); 6495 list_del_init(&p->list); 6496 spin_unlock(&pers_lock); 6497 return 0; 6498} 6499 6500static int is_mddev_idle(mddev_t *mddev, int init) 6501{ 6502 mdk_rdev_t * rdev; 6503 int idle; 6504 int curr_events; 6505 6506 idle = 1; 6507 rcu_read_lock(); 6508 rdev_for_each_rcu(rdev, mddev) { 6509 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6510 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6511 (int)part_stat_read(&disk->part0, sectors[1]) - 6512 atomic_read(&disk->sync_io); 6513 /* sync IO will cause sync_io to increase before the disk_stats 6514 * as sync_io is counted when a request starts, and 6515 * disk_stats is counted when it completes. 6516 * So resync activity will cause curr_events to be smaller than 6517 * when there was no such activity. 6518 * non-sync IO will cause disk_stat to increase without 6519 * increasing sync_io so curr_events will (eventually) 6520 * be larger than it was before. Once it becomes 6521 * substantially larger, the test below will cause 6522 * the array to appear non-idle, and resync will slow 6523 * down. 6524 * If there is a lot of outstanding resync activity when 6525 * we set last_event to curr_events, then all that activity 6526 * completing might cause the array to appear non-idle 6527 * and resync will be slowed down even though there might 6528 * not have been non-resync activity. This will only 6529 * happen once though. 'last_events' will soon reflect 6530 * the state where there is little or no outstanding 6531 * resync requests, and further resync activity will 6532 * always make curr_events less than last_events. 6533 * 6534 */ 6535 if (init || curr_events - rdev->last_events > 64) { 6536 rdev->last_events = curr_events; 6537 idle = 0; 6538 } 6539 } 6540 rcu_read_unlock(); 6541 return idle; 6542} 6543 6544void md_done_sync(mddev_t *mddev, int blocks, int ok) 6545{ 6546 /* another "blocks" (512byte) blocks have been synced */ 6547 atomic_sub(blocks, &mddev->recovery_active); 6548 wake_up(&mddev->recovery_wait); 6549 if (!ok) { 6550 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6551 md_wakeup_thread(mddev->thread); 6552 // stop recovery, signal do_sync .... 6553 } 6554} 6555 6556 6557/* md_write_start(mddev, bi) 6558 * If we need to update some array metadata (e.g. 'active' flag 6559 * in superblock) before writing, schedule a superblock update 6560 * and wait for it to complete. 6561 */ 6562void md_write_start(mddev_t *mddev, struct bio *bi) 6563{ 6564 int did_change = 0; 6565 if (bio_data_dir(bi) != WRITE) 6566 return; 6567 6568 BUG_ON(mddev->ro == 1); 6569 if (mddev->ro == 2) { 6570 /* need to switch to read/write */ 6571 mddev->ro = 0; 6572 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6573 md_wakeup_thread(mddev->thread); 6574 md_wakeup_thread(mddev->sync_thread); 6575 did_change = 1; 6576 } 6577 atomic_inc(&mddev->writes_pending); 6578 if (mddev->safemode == 1) 6579 mddev->safemode = 0; 6580 if (mddev->in_sync) { 6581 spin_lock_irq(&mddev->write_lock); 6582 if (mddev->in_sync) { 6583 mddev->in_sync = 0; 6584 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6585 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6586 md_wakeup_thread(mddev->thread); 6587 did_change = 1; 6588 } 6589 spin_unlock_irq(&mddev->write_lock); 6590 } 6591 if (did_change) 6592 sysfs_notify_dirent_safe(mddev->sysfs_state); 6593 wait_event(mddev->sb_wait, 6594 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6595} 6596 6597void md_write_end(mddev_t *mddev) 6598{ 6599 if (atomic_dec_and_test(&mddev->writes_pending)) { 6600 if (mddev->safemode == 2) 6601 md_wakeup_thread(mddev->thread); 6602 else if (mddev->safemode_delay) 6603 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6604 } 6605} 6606 6607/* md_allow_write(mddev) 6608 * Calling this ensures that the array is marked 'active' so that writes 6609 * may proceed without blocking. It is important to call this before 6610 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6611 * Must be called with mddev_lock held. 6612 * 6613 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6614 * is dropped, so return -EAGAIN after notifying userspace. 6615 */ 6616int md_allow_write(mddev_t *mddev) 6617{ 6618 if (!mddev->pers) 6619 return 0; 6620 if (mddev->ro) 6621 return 0; 6622 if (!mddev->pers->sync_request) 6623 return 0; 6624 6625 spin_lock_irq(&mddev->write_lock); 6626 if (mddev->in_sync) { 6627 mddev->in_sync = 0; 6628 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6629 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6630 if (mddev->safemode_delay && 6631 mddev->safemode == 0) 6632 mddev->safemode = 1; 6633 spin_unlock_irq(&mddev->write_lock); 6634 md_update_sb(mddev, 0); 6635 sysfs_notify_dirent_safe(mddev->sysfs_state); 6636 } else 6637 spin_unlock_irq(&mddev->write_lock); 6638 6639 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 6640 return -EAGAIN; 6641 else 6642 return 0; 6643} 6644EXPORT_SYMBOL_GPL(md_allow_write); 6645 6646void md_unplug(mddev_t *mddev) 6647{ 6648 if (mddev->queue) 6649 blk_unplug(mddev->queue); 6650 if (mddev->plug) 6651 mddev->plug->unplug_fn(mddev->plug); 6652} 6653 6654#define SYNC_MARKS 10 6655#define SYNC_MARK_STEP (3*HZ) 6656void md_do_sync(mddev_t *mddev) 6657{ 6658 mddev_t *mddev2; 6659 unsigned int currspeed = 0, 6660 window; 6661 sector_t max_sectors,j, io_sectors; 6662 unsigned long mark[SYNC_MARKS]; 6663 sector_t mark_cnt[SYNC_MARKS]; 6664 int last_mark,m; 6665 struct list_head *tmp; 6666 sector_t last_check; 6667 int skipped = 0; 6668 mdk_rdev_t *rdev; 6669 char *desc; 6670 6671 /* just incase thread restarts... */ 6672 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 6673 return; 6674 if (mddev->ro) /* never try to sync a read-only array */ 6675 return; 6676 6677 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6678 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 6679 desc = "data-check"; 6680 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6681 desc = "requested-resync"; 6682 else 6683 desc = "resync"; 6684 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6685 desc = "reshape"; 6686 else 6687 desc = "recovery"; 6688 6689 /* we overload curr_resync somewhat here. 6690 * 0 == not engaged in resync at all 6691 * 2 == checking that there is no conflict with another sync 6692 * 1 == like 2, but have yielded to allow conflicting resync to 6693 * commense 6694 * other == active in resync - this many blocks 6695 * 6696 * Before starting a resync we must have set curr_resync to 6697 * 2, and then checked that every "conflicting" array has curr_resync 6698 * less than ours. When we find one that is the same or higher 6699 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 6700 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 6701 * This will mean we have to start checking from the beginning again. 6702 * 6703 */ 6704 6705 do { 6706 mddev->curr_resync = 2; 6707 6708 try_again: 6709 if (kthread_should_stop()) 6710 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6711 6712 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6713 goto skip; 6714 for_each_mddev(mddev2, tmp) { 6715 if (mddev2 == mddev) 6716 continue; 6717 if (!mddev->parallel_resync 6718 && mddev2->curr_resync 6719 && match_mddev_units(mddev, mddev2)) { 6720 DEFINE_WAIT(wq); 6721 if (mddev < mddev2 && mddev->curr_resync == 2) { 6722 /* arbitrarily yield */ 6723 mddev->curr_resync = 1; 6724 wake_up(&resync_wait); 6725 } 6726 if (mddev > mddev2 && mddev->curr_resync == 1) 6727 /* no need to wait here, we can wait the next 6728 * time 'round when curr_resync == 2 6729 */ 6730 continue; 6731 /* We need to wait 'interruptible' so as not to 6732 * contribute to the load average, and not to 6733 * be caught by 'softlockup' 6734 */ 6735 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 6736 if (!kthread_should_stop() && 6737 mddev2->curr_resync >= mddev->curr_resync) { 6738 printk(KERN_INFO "md: delaying %s of %s" 6739 " until %s has finished (they" 6740 " share one or more physical units)\n", 6741 desc, mdname(mddev), mdname(mddev2)); 6742 mddev_put(mddev2); 6743 if (signal_pending(current)) 6744 flush_signals(current); 6745 schedule(); 6746 finish_wait(&resync_wait, &wq); 6747 goto try_again; 6748 } 6749 finish_wait(&resync_wait, &wq); 6750 } 6751 } 6752 } while (mddev->curr_resync < 2); 6753 6754 j = 0; 6755 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6756 /* resync follows the size requested by the personality, 6757 * which defaults to physical size, but can be virtual size 6758 */ 6759 max_sectors = mddev->resync_max_sectors; 6760 mddev->resync_mismatches = 0; 6761 /* we don't use the checkpoint if there's a bitmap */ 6762 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6763 j = mddev->resync_min; 6764 else if (!mddev->bitmap) 6765 j = mddev->recovery_cp; 6766 6767 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6768 max_sectors = mddev->dev_sectors; 6769 else { 6770 /* recovery follows the physical size of devices */ 6771 max_sectors = mddev->dev_sectors; 6772 j = MaxSector; 6773 rcu_read_lock(); 6774 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 6775 if (rdev->raid_disk >= 0 && 6776 !test_bit(Faulty, &rdev->flags) && 6777 !test_bit(In_sync, &rdev->flags) && 6778 rdev->recovery_offset < j) 6779 j = rdev->recovery_offset; 6780 rcu_read_unlock(); 6781 } 6782 6783 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6784 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 6785 " %d KB/sec/disk.\n", speed_min(mddev)); 6786 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 6787 "(but not more than %d KB/sec) for %s.\n", 6788 speed_max(mddev), desc); 6789 6790 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 6791 6792 io_sectors = 0; 6793 for (m = 0; m < SYNC_MARKS; m++) { 6794 mark[m] = jiffies; 6795 mark_cnt[m] = io_sectors; 6796 } 6797 last_mark = 0; 6798 mddev->resync_mark = mark[last_mark]; 6799 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6800 6801 /* 6802 * Tune reconstruction: 6803 */ 6804 window = 32*(PAGE_SIZE/512); 6805 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6806 window/2,(unsigned long long) max_sectors/2); 6807 6808 atomic_set(&mddev->recovery_active, 0); 6809 last_check = 0; 6810 6811 if (j>2) { 6812 printk(KERN_INFO 6813 "md: resuming %s of %s from checkpoint.\n", 6814 desc, mdname(mddev)); 6815 mddev->curr_resync = j; 6816 } 6817 mddev->curr_resync_completed = mddev->curr_resync; 6818 6819 while (j < max_sectors) { 6820 sector_t sectors; 6821 6822 skipped = 0; 6823 6824 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6825 ((mddev->curr_resync > mddev->curr_resync_completed && 6826 (mddev->curr_resync - mddev->curr_resync_completed) 6827 > (max_sectors >> 4)) || 6828 (j - mddev->curr_resync_completed)*2 6829 >= mddev->resync_max - mddev->curr_resync_completed 6830 )) { 6831 /* time to update curr_resync_completed */ 6832 md_unplug(mddev); 6833 wait_event(mddev->recovery_wait, 6834 atomic_read(&mddev->recovery_active) == 0); 6835 mddev->curr_resync_completed = 6836 mddev->curr_resync; 6837 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6838 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6839 } 6840 6841 while (j >= mddev->resync_max && !kthread_should_stop()) { 6842 /* As this condition is controlled by user-space, 6843 * we can block indefinitely, so use '_interruptible' 6844 * to avoid triggering warnings. 6845 */ 6846 flush_signals(current); /* just in case */ 6847 wait_event_interruptible(mddev->recovery_wait, 6848 mddev->resync_max > j 6849 || kthread_should_stop()); 6850 } 6851 6852 if (kthread_should_stop()) 6853 goto interrupted; 6854 6855 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6856 currspeed < speed_min(mddev)); 6857 if (sectors == 0) { 6858 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6859 goto out; 6860 } 6861 6862 if (!skipped) { /* actual IO requested */ 6863 io_sectors += sectors; 6864 atomic_add(sectors, &mddev->recovery_active); 6865 } 6866 6867 j += sectors; 6868 if (j>1) mddev->curr_resync = j; 6869 mddev->curr_mark_cnt = io_sectors; 6870 if (last_check == 0) 6871 /* this is the earliers that rebuilt will be 6872 * visible in /proc/mdstat 6873 */ 6874 md_new_event(mddev); 6875 6876 if (last_check + window > io_sectors || j == max_sectors) 6877 continue; 6878 6879 last_check = io_sectors; 6880 6881 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6882 break; 6883 6884 repeat: 6885 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6886 /* step marks */ 6887 int next = (last_mark+1) % SYNC_MARKS; 6888 6889 mddev->resync_mark = mark[next]; 6890 mddev->resync_mark_cnt = mark_cnt[next]; 6891 mark[next] = jiffies; 6892 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6893 last_mark = next; 6894 } 6895 6896 6897 if (kthread_should_stop()) 6898 goto interrupted; 6899 6900 6901 /* 6902 * this loop exits only if either when we are slower than 6903 * the 'hard' speed limit, or the system was IO-idle for 6904 * a jiffy. 6905 * the system might be non-idle CPU-wise, but we only care 6906 * about not overloading the IO subsystem. (things like an 6907 * e2fsck being done on the RAID array should execute fast) 6908 */ 6909 md_unplug(mddev); 6910 cond_resched(); 6911 6912 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6913 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6914 6915 if (currspeed > speed_min(mddev)) { 6916 if ((currspeed > speed_max(mddev)) || 6917 !is_mddev_idle(mddev, 0)) { 6918 msleep(500); 6919 goto repeat; 6920 } 6921 } 6922 } 6923 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6924 /* 6925 * this also signals 'finished resyncing' to md_stop 6926 */ 6927 out: 6928 md_unplug(mddev); 6929 6930 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6931 6932 /* tell personality that we are finished */ 6933 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6934 6935 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6936 mddev->curr_resync > 2) { 6937 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6938 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6939 if (mddev->curr_resync >= mddev->recovery_cp) { 6940 printk(KERN_INFO 6941 "md: checkpointing %s of %s.\n", 6942 desc, mdname(mddev)); 6943 mddev->recovery_cp = mddev->curr_resync; 6944 } 6945 } else 6946 mddev->recovery_cp = MaxSector; 6947 } else { 6948 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6949 mddev->curr_resync = MaxSector; 6950 rcu_read_lock(); 6951 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 6952 if (rdev->raid_disk >= 0 && 6953 mddev->delta_disks >= 0 && 6954 !test_bit(Faulty, &rdev->flags) && 6955 !test_bit(In_sync, &rdev->flags) && 6956 rdev->recovery_offset < mddev->curr_resync) 6957 rdev->recovery_offset = mddev->curr_resync; 6958 rcu_read_unlock(); 6959 } 6960 } 6961 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6962 6963 skip: 6964 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6965 /* We completed so min/max setting can be forgotten if used. */ 6966 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6967 mddev->resync_min = 0; 6968 mddev->resync_max = MaxSector; 6969 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6970 mddev->resync_min = mddev->curr_resync_completed; 6971 mddev->curr_resync = 0; 6972 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6973 mddev->curr_resync_completed = 0; 6974 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6975 wake_up(&resync_wait); 6976 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6977 md_wakeup_thread(mddev->thread); 6978 return; 6979 6980 interrupted: 6981 /* 6982 * got a signal, exit. 6983 */ 6984 printk(KERN_INFO 6985 "md: md_do_sync() got signal ... exiting\n"); 6986 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6987 goto out; 6988 6989} 6990EXPORT_SYMBOL_GPL(md_do_sync); 6991 6992 6993static int remove_and_add_spares(mddev_t *mddev) 6994{ 6995 mdk_rdev_t *rdev; 6996 int spares = 0; 6997 6998 mddev->curr_resync_completed = 0; 6999 7000 list_for_each_entry(rdev, &mddev->disks, same_set) 7001 if (rdev->raid_disk >= 0 && 7002 !test_bit(Blocked, &rdev->flags) && 7003 (test_bit(Faulty, &rdev->flags) || 7004 ! test_bit(In_sync, &rdev->flags)) && 7005 atomic_read(&rdev->nr_pending)==0) { 7006 if (mddev->pers->hot_remove_disk( 7007 mddev, rdev->raid_disk)==0) { 7008 char nm[20]; 7009 sprintf(nm,"rd%d", rdev->raid_disk); 7010 sysfs_remove_link(&mddev->kobj, nm); 7011 rdev->raid_disk = -1; 7012 } 7013 } 7014 7015 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7016 list_for_each_entry(rdev, &mddev->disks, same_set) { 7017 if (rdev->raid_disk >= 0 && 7018 !test_bit(In_sync, &rdev->flags) && 7019 !test_bit(Blocked, &rdev->flags)) 7020 spares++; 7021 if (rdev->raid_disk < 0 7022 && !test_bit(Faulty, &rdev->flags)) { 7023 rdev->recovery_offset = 0; 7024 if (mddev->pers-> 7025 hot_add_disk(mddev, rdev) == 0) { 7026 char nm[20]; 7027 sprintf(nm, "rd%d", rdev->raid_disk); 7028 if (sysfs_create_link(&mddev->kobj, 7029 &rdev->kobj, nm)) 7030 /* failure here is OK */; 7031 spares++; 7032 md_new_event(mddev); 7033 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7034 } else 7035 break; 7036 } 7037 } 7038 } 7039 return spares; 7040} 7041/* 7042 * This routine is regularly called by all per-raid-array threads to 7043 * deal with generic issues like resync and super-block update. 7044 * Raid personalities that don't have a thread (linear/raid0) do not 7045 * need this as they never do any recovery or update the superblock. 7046 * 7047 * It does not do any resync itself, but rather "forks" off other threads 7048 * to do that as needed. 7049 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7050 * "->recovery" and create a thread at ->sync_thread. 7051 * When the thread finishes it sets MD_RECOVERY_DONE 7052 * and wakeups up this thread which will reap the thread and finish up. 7053 * This thread also removes any faulty devices (with nr_pending == 0). 7054 * 7055 * The overall approach is: 7056 * 1/ if the superblock needs updating, update it. 7057 * 2/ If a recovery thread is running, don't do anything else. 7058 * 3/ If recovery has finished, clean up, possibly marking spares active. 7059 * 4/ If there are any faulty devices, remove them. 7060 * 5/ If array is degraded, try to add spares devices 7061 * 6/ If array has spares or is not in-sync, start a resync thread. 7062 */ 7063void md_check_recovery(mddev_t *mddev) 7064{ 7065 mdk_rdev_t *rdev; 7066 7067 7068 if (mddev->bitmap) 7069 bitmap_daemon_work(mddev); 7070 7071 if (mddev->ro) 7072 return; 7073 7074 if (signal_pending(current)) { 7075 if (mddev->pers->sync_request && !mddev->external) { 7076 printk(KERN_INFO "md: %s in immediate safe mode\n", 7077 mdname(mddev)); 7078 mddev->safemode = 2; 7079 } 7080 flush_signals(current); 7081 } 7082 7083 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7084 return; 7085 if ( ! ( 7086 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7087 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7088 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7089 (mddev->external == 0 && mddev->safemode == 1) || 7090 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7091 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7092 )) 7093 return; 7094 7095 if (mddev_trylock(mddev)) { 7096 int spares = 0; 7097 7098 if (mddev->ro) { 7099 /* Only thing we do on a ro array is remove 7100 * failed devices. 7101 */ 7102 remove_and_add_spares(mddev); 7103 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7104 goto unlock; 7105 } 7106 7107 if (!mddev->external) { 7108 int did_change = 0; 7109 spin_lock_irq(&mddev->write_lock); 7110 if (mddev->safemode && 7111 !atomic_read(&mddev->writes_pending) && 7112 !mddev->in_sync && 7113 mddev->recovery_cp == MaxSector) { 7114 mddev->in_sync = 1; 7115 did_change = 1; 7116 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7117 } 7118 if (mddev->safemode == 1) 7119 mddev->safemode = 0; 7120 spin_unlock_irq(&mddev->write_lock); 7121 if (did_change) 7122 sysfs_notify_dirent_safe(mddev->sysfs_state); 7123 } 7124 7125 if (mddev->flags) 7126 md_update_sb(mddev, 0); 7127 7128 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7129 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7130 /* resync/recovery still happening */ 7131 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7132 goto unlock; 7133 } 7134 if (mddev->sync_thread) { 7135 /* resync has finished, collect result */ 7136 md_unregister_thread(mddev->sync_thread); 7137 mddev->sync_thread = NULL; 7138 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7139 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7140 /* success...*/ 7141 /* activate any spares */ 7142 if (mddev->pers->spare_active(mddev)) 7143 sysfs_notify(&mddev->kobj, NULL, 7144 "degraded"); 7145 } 7146 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7147 mddev->pers->finish_reshape) 7148 mddev->pers->finish_reshape(mddev); 7149 md_update_sb(mddev, 1); 7150 7151 /* if array is no-longer degraded, then any saved_raid_disk 7152 * information must be scrapped 7153 */ 7154 if (!mddev->degraded) 7155 list_for_each_entry(rdev, &mddev->disks, same_set) 7156 rdev->saved_raid_disk = -1; 7157 7158 mddev->recovery = 0; 7159 /* flag recovery needed just to double check */ 7160 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7161 sysfs_notify_dirent_safe(mddev->sysfs_action); 7162 md_new_event(mddev); 7163 goto unlock; 7164 } 7165 /* Set RUNNING before clearing NEEDED to avoid 7166 * any transients in the value of "sync_action". 7167 */ 7168 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7169 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7170 /* Clear some bits that don't mean anything, but 7171 * might be left set 7172 */ 7173 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7174 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7175 7176 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7177 goto unlock; 7178 /* no recovery is running. 7179 * remove any failed drives, then 7180 * add spares if possible. 7181 * Spare are also removed and re-added, to allow 7182 * the personality to fail the re-add. 7183 */ 7184 7185 if (mddev->reshape_position != MaxSector) { 7186 if (mddev->pers->check_reshape == NULL || 7187 mddev->pers->check_reshape(mddev) != 0) 7188 /* Cannot proceed */ 7189 goto unlock; 7190 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7191 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7192 } else if ((spares = remove_and_add_spares(mddev))) { 7193 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7194 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7195 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7196 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7197 } else if (mddev->recovery_cp < MaxSector) { 7198 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7199 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7200 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7201 /* nothing to be done ... */ 7202 goto unlock; 7203 7204 if (mddev->pers->sync_request) { 7205 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7206 /* We are adding a device or devices to an array 7207 * which has the bitmap stored on all devices. 7208 * So make sure all bitmap pages get written 7209 */ 7210 bitmap_write_all(mddev->bitmap); 7211 } 7212 mddev->sync_thread = md_register_thread(md_do_sync, 7213 mddev, 7214 "resync"); 7215 if (!mddev->sync_thread) { 7216 printk(KERN_ERR "%s: could not start resync" 7217 " thread...\n", 7218 mdname(mddev)); 7219 /* leave the spares where they are, it shouldn't hurt */ 7220 mddev->recovery = 0; 7221 } else 7222 md_wakeup_thread(mddev->sync_thread); 7223 sysfs_notify_dirent_safe(mddev->sysfs_action); 7224 md_new_event(mddev); 7225 } 7226 unlock: 7227 if (!mddev->sync_thread) { 7228 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7229 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7230 &mddev->recovery)) 7231 if (mddev->sysfs_action) 7232 sysfs_notify_dirent_safe(mddev->sysfs_action); 7233 } 7234 mddev_unlock(mddev); 7235 } 7236} 7237 7238void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 7239{ 7240 sysfs_notify_dirent_safe(rdev->sysfs_state); 7241 wait_event_timeout(rdev->blocked_wait, 7242 !test_bit(Blocked, &rdev->flags), 7243 msecs_to_jiffies(5000)); 7244 rdev_dec_pending(rdev, mddev); 7245} 7246EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7247 7248static int md_notify_reboot(struct notifier_block *this, 7249 unsigned long code, void *x) 7250{ 7251 struct list_head *tmp; 7252 mddev_t *mddev; 7253 7254 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 7255 7256 printk(KERN_INFO "md: stopping all md devices.\n"); 7257 7258 for_each_mddev(mddev, tmp) 7259 if (mddev_trylock(mddev)) { 7260 /* Force a switch to readonly even array 7261 * appears to still be in use. Hence 7262 * the '100'. 7263 */ 7264 md_set_readonly(mddev, 100); 7265 mddev_unlock(mddev); 7266 } 7267 /* 7268 * certain more exotic SCSI devices are known to be 7269 * volatile wrt too early system reboots. While the 7270 * right place to handle this issue is the given 7271 * driver, we do want to have a safe RAID driver ... 7272 */ 7273 mdelay(1000*1); 7274 } 7275 return NOTIFY_DONE; 7276} 7277 7278static struct notifier_block md_notifier = { 7279 .notifier_call = md_notify_reboot, 7280 .next = NULL, 7281 .priority = INT_MAX, /* before any real devices */ 7282}; 7283 7284static void md_geninit(void) 7285{ 7286 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 7287 7288 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 7289} 7290 7291static int __init md_init(void) 7292{ 7293 if (register_blkdev(MD_MAJOR, "md")) 7294 return -1; 7295 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 7296 unregister_blkdev(MD_MAJOR, "md"); 7297 return -1; 7298 } 7299 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 7300 md_probe, NULL, NULL); 7301 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 7302 md_probe, NULL, NULL); 7303 7304 register_reboot_notifier(&md_notifier); 7305 raid_table_header = register_sysctl_table(raid_root_table); 7306 7307 md_geninit(); 7308 return 0; 7309} 7310 7311 7312#ifndef MODULE 7313 7314/* 7315 * Searches all registered partitions for autorun RAID arrays 7316 * at boot time. 7317 */ 7318 7319static LIST_HEAD(all_detected_devices); 7320struct detected_devices_node { 7321 struct list_head list; 7322 dev_t dev; 7323}; 7324 7325void md_autodetect_dev(dev_t dev) 7326{ 7327 struct detected_devices_node *node_detected_dev; 7328 7329 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 7330 if (node_detected_dev) { 7331 node_detected_dev->dev = dev; 7332 list_add_tail(&node_detected_dev->list, &all_detected_devices); 7333 } else { 7334 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 7335 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 7336 } 7337} 7338 7339 7340static void autostart_arrays(int part) 7341{ 7342 mdk_rdev_t *rdev; 7343 struct detected_devices_node *node_detected_dev; 7344 dev_t dev; 7345 int i_scanned, i_passed; 7346 7347 i_scanned = 0; 7348 i_passed = 0; 7349 7350 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 7351 7352 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 7353 i_scanned++; 7354 node_detected_dev = list_entry(all_detected_devices.next, 7355 struct detected_devices_node, list); 7356 list_del(&node_detected_dev->list); 7357 dev = node_detected_dev->dev; 7358 kfree(node_detected_dev); 7359 rdev = md_import_device(dev,0, 90); 7360 if (IS_ERR(rdev)) 7361 continue; 7362 7363 if (test_bit(Faulty, &rdev->flags)) { 7364 MD_BUG(); 7365 continue; 7366 } 7367 set_bit(AutoDetected, &rdev->flags); 7368 list_add(&rdev->same_set, &pending_raid_disks); 7369 i_passed++; 7370 } 7371 7372 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 7373 i_scanned, i_passed); 7374 7375 autorun_devices(part); 7376} 7377 7378#endif /* !MODULE */ 7379 7380static __exit void md_exit(void) 7381{ 7382 mddev_t *mddev; 7383 struct list_head *tmp; 7384 7385 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 7386 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 7387 7388 unregister_blkdev(MD_MAJOR,"md"); 7389 unregister_blkdev(mdp_major, "mdp"); 7390 unregister_reboot_notifier(&md_notifier); 7391 unregister_sysctl_table(raid_table_header); 7392 remove_proc_entry("mdstat", NULL); 7393 for_each_mddev(mddev, tmp) { 7394 export_array(mddev); 7395 mddev->hold_active = 0; 7396 } 7397} 7398 7399subsys_initcall(md_init); 7400module_exit(md_exit) 7401 7402static int get_ro(char *buffer, struct kernel_param *kp) 7403{ 7404 return sprintf(buffer, "%d", start_readonly); 7405} 7406static int set_ro(const char *val, struct kernel_param *kp) 7407{ 7408 char *e; 7409 int num = simple_strtoul(val, &e, 10); 7410 if (*val && (*e == '\0' || *e == '\n')) { 7411 start_readonly = num; 7412 return 0; 7413 } 7414 return -EINVAL; 7415} 7416 7417module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 7418module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 7419 7420module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 7421 7422EXPORT_SYMBOL(register_md_personality); 7423EXPORT_SYMBOL(unregister_md_personality); 7424EXPORT_SYMBOL(md_error); 7425EXPORT_SYMBOL(md_done_sync); 7426EXPORT_SYMBOL(md_write_start); 7427EXPORT_SYMBOL(md_write_end); 7428EXPORT_SYMBOL(md_register_thread); 7429EXPORT_SYMBOL(md_unregister_thread); 7430EXPORT_SYMBOL(md_wakeup_thread); 7431EXPORT_SYMBOL(md_check_recovery); 7432MODULE_LICENSE("GPL"); 7433MODULE_DESCRIPTION("MD RAID framework"); 7434MODULE_ALIAS("md"); 7435MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 7436