g_mirror.c revision 258357
1/*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 258357 2013-11-19 22:55:17Z ae $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/module.h> 34#include <sys/limits.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/bio.h> 38#include <sys/sbuf.h> 39#include <sys/sysctl.h> 40#include <sys/malloc.h> 41#include <sys/eventhandler.h> 42#include <vm/uma.h> 43#include <geom/geom.h> 44#include <sys/proc.h> 45#include <sys/kthread.h> 46#include <sys/sched.h> 47#include <geom/mirror/g_mirror.h> 48 49FEATURE(geom_mirror, "GEOM mirroring support"); 50 51static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 52 53SYSCTL_DECL(_kern_geom); 54static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 55 "GEOM_MIRROR stuff"); 56u_int g_mirror_debug = 0; 57TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 58SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 59 "Debug level"); 60static u_int g_mirror_timeout = 4; 61TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 62SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 63 0, "Time to wait on all mirror components"); 64static u_int g_mirror_idletime = 5; 65TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 66SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 67 &g_mirror_idletime, 0, "Mark components as clean when idling"); 68static u_int g_mirror_disconnect_on_failure = 1; 69TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 70 &g_mirror_disconnect_on_failure); 71SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 72 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 73static u_int g_mirror_syncreqs = 2; 74TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 75SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 76 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 77 78#define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 79 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 80 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 81 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 82} while (0) 83 84static eventhandler_tag g_mirror_post_sync = NULL; 85static int g_mirror_shutdown = 0; 86 87static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 88 struct g_geom *gp); 89static g_taste_t g_mirror_taste; 90static g_resize_t g_mirror_resize; 91static void g_mirror_init(struct g_class *mp); 92static void g_mirror_fini(struct g_class *mp); 93 94struct g_class g_mirror_class = { 95 .name = G_MIRROR_CLASS_NAME, 96 .version = G_VERSION, 97 .ctlreq = g_mirror_config, 98 .taste = g_mirror_taste, 99 .destroy_geom = g_mirror_destroy_geom, 100 .init = g_mirror_init, 101 .fini = g_mirror_fini, 102 .resize = g_mirror_resize 103}; 104 105 106static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 107static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 108static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 109static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 110 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 111static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 112static void g_mirror_register_request(struct bio *bp); 113static void g_mirror_sync_release(struct g_mirror_softc *sc); 114 115 116static const char * 117g_mirror_disk_state2str(int state) 118{ 119 120 switch (state) { 121 case G_MIRROR_DISK_STATE_NONE: 122 return ("NONE"); 123 case G_MIRROR_DISK_STATE_NEW: 124 return ("NEW"); 125 case G_MIRROR_DISK_STATE_ACTIVE: 126 return ("ACTIVE"); 127 case G_MIRROR_DISK_STATE_STALE: 128 return ("STALE"); 129 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 130 return ("SYNCHRONIZING"); 131 case G_MIRROR_DISK_STATE_DISCONNECTED: 132 return ("DISCONNECTED"); 133 case G_MIRROR_DISK_STATE_DESTROY: 134 return ("DESTROY"); 135 default: 136 return ("INVALID"); 137 } 138} 139 140static const char * 141g_mirror_device_state2str(int state) 142{ 143 144 switch (state) { 145 case G_MIRROR_DEVICE_STATE_STARTING: 146 return ("STARTING"); 147 case G_MIRROR_DEVICE_STATE_RUNNING: 148 return ("RUNNING"); 149 default: 150 return ("INVALID"); 151 } 152} 153 154static const char * 155g_mirror_get_diskname(struct g_mirror_disk *disk) 156{ 157 158 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 159 return ("[unknown]"); 160 return (disk->d_name); 161} 162 163/* 164 * --- Events handling functions --- 165 * Events in geom_mirror are used to maintain disks and device status 166 * from one thread to simplify locking. 167 */ 168static void 169g_mirror_event_free(struct g_mirror_event *ep) 170{ 171 172 free(ep, M_MIRROR); 173} 174 175int 176g_mirror_event_send(void *arg, int state, int flags) 177{ 178 struct g_mirror_softc *sc; 179 struct g_mirror_disk *disk; 180 struct g_mirror_event *ep; 181 int error; 182 183 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 184 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 185 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 186 disk = NULL; 187 sc = arg; 188 } else { 189 disk = arg; 190 sc = disk->d_softc; 191 } 192 ep->e_disk = disk; 193 ep->e_state = state; 194 ep->e_flags = flags; 195 ep->e_error = 0; 196 mtx_lock(&sc->sc_events_mtx); 197 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 198 mtx_unlock(&sc->sc_events_mtx); 199 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 200 mtx_lock(&sc->sc_queue_mtx); 201 wakeup(sc); 202 mtx_unlock(&sc->sc_queue_mtx); 203 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 204 return (0); 205 sx_assert(&sc->sc_lock, SX_XLOCKED); 206 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 207 sx_xunlock(&sc->sc_lock); 208 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 209 mtx_lock(&sc->sc_events_mtx); 210 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 211 hz * 5); 212 } 213 error = ep->e_error; 214 g_mirror_event_free(ep); 215 sx_xlock(&sc->sc_lock); 216 return (error); 217} 218 219static struct g_mirror_event * 220g_mirror_event_get(struct g_mirror_softc *sc) 221{ 222 struct g_mirror_event *ep; 223 224 mtx_lock(&sc->sc_events_mtx); 225 ep = TAILQ_FIRST(&sc->sc_events); 226 mtx_unlock(&sc->sc_events_mtx); 227 return (ep); 228} 229 230static void 231g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 232{ 233 234 mtx_lock(&sc->sc_events_mtx); 235 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 236 mtx_unlock(&sc->sc_events_mtx); 237} 238 239static void 240g_mirror_event_cancel(struct g_mirror_disk *disk) 241{ 242 struct g_mirror_softc *sc; 243 struct g_mirror_event *ep, *tmpep; 244 245 sc = disk->d_softc; 246 sx_assert(&sc->sc_lock, SX_XLOCKED); 247 248 mtx_lock(&sc->sc_events_mtx); 249 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 250 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 251 continue; 252 if (ep->e_disk != disk) 253 continue; 254 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 255 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 256 g_mirror_event_free(ep); 257 else { 258 ep->e_error = ECANCELED; 259 wakeup(ep); 260 } 261 } 262 mtx_unlock(&sc->sc_events_mtx); 263} 264 265/* 266 * Return the number of disks in given state. 267 * If state is equal to -1, count all connected disks. 268 */ 269u_int 270g_mirror_ndisks(struct g_mirror_softc *sc, int state) 271{ 272 struct g_mirror_disk *disk; 273 u_int n = 0; 274 275 sx_assert(&sc->sc_lock, SX_LOCKED); 276 277 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 278 if (state == -1 || disk->d_state == state) 279 n++; 280 } 281 return (n); 282} 283 284/* 285 * Find a disk in mirror by its disk ID. 286 */ 287static struct g_mirror_disk * 288g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 289{ 290 struct g_mirror_disk *disk; 291 292 sx_assert(&sc->sc_lock, SX_XLOCKED); 293 294 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 295 if (disk->d_id == id) 296 return (disk); 297 } 298 return (NULL); 299} 300 301static u_int 302g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 303{ 304 struct bio *bp; 305 u_int nreqs = 0; 306 307 mtx_lock(&sc->sc_queue_mtx); 308 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 309 if (bp->bio_from == cp) 310 nreqs++; 311 } 312 mtx_unlock(&sc->sc_queue_mtx); 313 return (nreqs); 314} 315 316static int 317g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 318{ 319 320 if (cp->index > 0) { 321 G_MIRROR_DEBUG(2, 322 "I/O requests for %s exist, can't destroy it now.", 323 cp->provider->name); 324 return (1); 325 } 326 if (g_mirror_nrequests(sc, cp) > 0) { 327 G_MIRROR_DEBUG(2, 328 "I/O requests for %s in queue, can't destroy it now.", 329 cp->provider->name); 330 return (1); 331 } 332 return (0); 333} 334 335static void 336g_mirror_destroy_consumer(void *arg, int flags __unused) 337{ 338 struct g_consumer *cp; 339 340 g_topology_assert(); 341 342 cp = arg; 343 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 344 g_detach(cp); 345 g_destroy_consumer(cp); 346} 347 348static void 349g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 350{ 351 struct g_provider *pp; 352 int retaste_wait; 353 354 g_topology_assert(); 355 356 cp->private = NULL; 357 if (g_mirror_is_busy(sc, cp)) 358 return; 359 pp = cp->provider; 360 retaste_wait = 0; 361 if (cp->acw == 1) { 362 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 363 retaste_wait = 1; 364 } 365 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 366 -cp->acw, -cp->ace, 0); 367 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 368 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 369 if (retaste_wait) { 370 /* 371 * After retaste event was send (inside g_access()), we can send 372 * event to detach and destroy consumer. 373 * A class, which has consumer to the given provider connected 374 * will not receive retaste event for the provider. 375 * This is the way how I ignore retaste events when I close 376 * consumers opened for write: I detach and destroy consumer 377 * after retaste event is sent. 378 */ 379 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 380 return; 381 } 382 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 383 g_detach(cp); 384 g_destroy_consumer(cp); 385} 386 387static int 388g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 389{ 390 struct g_consumer *cp; 391 int error; 392 393 g_topology_assert_not(); 394 KASSERT(disk->d_consumer == NULL, 395 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 396 397 g_topology_lock(); 398 cp = g_new_consumer(disk->d_softc->sc_geom); 399 cp->flags |= G_CF_DIRECT_RECEIVE; 400 error = g_attach(cp, pp); 401 if (error != 0) { 402 g_destroy_consumer(cp); 403 g_topology_unlock(); 404 return (error); 405 } 406 error = g_access(cp, 1, 1, 1); 407 if (error != 0) { 408 g_detach(cp); 409 g_destroy_consumer(cp); 410 g_topology_unlock(); 411 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 412 pp->name, error); 413 return (error); 414 } 415 g_topology_unlock(); 416 disk->d_consumer = cp; 417 disk->d_consumer->private = disk; 418 disk->d_consumer->index = 0; 419 420 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 421 return (0); 422} 423 424static void 425g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 426{ 427 428 g_topology_assert(); 429 430 if (cp == NULL) 431 return; 432 if (cp->provider != NULL) 433 g_mirror_kill_consumer(sc, cp); 434 else 435 g_destroy_consumer(cp); 436} 437 438/* 439 * Initialize disk. This means allocate memory, create consumer, attach it 440 * to the provider and open access (r1w1e1) to it. 441 */ 442static struct g_mirror_disk * 443g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 444 struct g_mirror_metadata *md, int *errorp) 445{ 446 struct g_mirror_disk *disk; 447 int i, error; 448 449 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 450 if (disk == NULL) { 451 error = ENOMEM; 452 goto fail; 453 } 454 disk->d_softc = sc; 455 error = g_mirror_connect_disk(disk, pp); 456 if (error != 0) 457 goto fail; 458 disk->d_id = md->md_did; 459 disk->d_state = G_MIRROR_DISK_STATE_NONE; 460 disk->d_priority = md->md_priority; 461 disk->d_flags = md->md_dflags; 462 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 463 if (error == 0 && i != 0) 464 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 465 if (md->md_provider[0] != '\0') 466 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 467 disk->d_sync.ds_consumer = NULL; 468 disk->d_sync.ds_offset = md->md_sync_offset; 469 disk->d_sync.ds_offset_done = md->md_sync_offset; 470 disk->d_genid = md->md_genid; 471 disk->d_sync.ds_syncid = md->md_syncid; 472 if (errorp != NULL) 473 *errorp = 0; 474 return (disk); 475fail: 476 if (errorp != NULL) 477 *errorp = error; 478 if (disk != NULL) 479 free(disk, M_MIRROR); 480 return (NULL); 481} 482 483static void 484g_mirror_destroy_disk(struct g_mirror_disk *disk) 485{ 486 struct g_mirror_softc *sc; 487 488 g_topology_assert_not(); 489 sc = disk->d_softc; 490 sx_assert(&sc->sc_lock, SX_XLOCKED); 491 492 LIST_REMOVE(disk, d_next); 493 g_mirror_event_cancel(disk); 494 if (sc->sc_hint == disk) 495 sc->sc_hint = NULL; 496 switch (disk->d_state) { 497 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 498 g_mirror_sync_stop(disk, 1); 499 /* FALLTHROUGH */ 500 case G_MIRROR_DISK_STATE_NEW: 501 case G_MIRROR_DISK_STATE_STALE: 502 case G_MIRROR_DISK_STATE_ACTIVE: 503 g_topology_lock(); 504 g_mirror_disconnect_consumer(sc, disk->d_consumer); 505 g_topology_unlock(); 506 free(disk, M_MIRROR); 507 break; 508 default: 509 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 510 g_mirror_get_diskname(disk), 511 g_mirror_disk_state2str(disk->d_state))); 512 } 513} 514 515static void 516g_mirror_destroy_device(struct g_mirror_softc *sc) 517{ 518 struct g_mirror_disk *disk; 519 struct g_mirror_event *ep; 520 struct g_geom *gp; 521 struct g_consumer *cp, *tmpcp; 522 523 g_topology_assert_not(); 524 sx_assert(&sc->sc_lock, SX_XLOCKED); 525 526 gp = sc->sc_geom; 527 if (sc->sc_provider != NULL) 528 g_mirror_destroy_provider(sc); 529 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 530 disk = LIST_FIRST(&sc->sc_disks)) { 531 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 532 g_mirror_update_metadata(disk); 533 g_mirror_destroy_disk(disk); 534 } 535 while ((ep = g_mirror_event_get(sc)) != NULL) { 536 g_mirror_event_remove(sc, ep); 537 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 538 g_mirror_event_free(ep); 539 else { 540 ep->e_error = ECANCELED; 541 ep->e_flags |= G_MIRROR_EVENT_DONE; 542 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 543 mtx_lock(&sc->sc_events_mtx); 544 wakeup(ep); 545 mtx_unlock(&sc->sc_events_mtx); 546 } 547 } 548 callout_drain(&sc->sc_callout); 549 550 g_topology_lock(); 551 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 552 g_mirror_disconnect_consumer(sc, cp); 553 } 554 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 555 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 556 g_wither_geom(gp, ENXIO); 557 g_topology_unlock(); 558 mtx_destroy(&sc->sc_queue_mtx); 559 mtx_destroy(&sc->sc_events_mtx); 560 mtx_destroy(&sc->sc_done_mtx); 561 sx_xunlock(&sc->sc_lock); 562 sx_destroy(&sc->sc_lock); 563} 564 565static void 566g_mirror_orphan(struct g_consumer *cp) 567{ 568 struct g_mirror_disk *disk; 569 570 g_topology_assert(); 571 572 disk = cp->private; 573 if (disk == NULL) 574 return; 575 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 576 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 577 G_MIRROR_EVENT_DONTWAIT); 578} 579 580/* 581 * Function should return the next active disk on the list. 582 * It is possible that it will be the same disk as given. 583 * If there are no active disks on list, NULL is returned. 584 */ 585static __inline struct g_mirror_disk * 586g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 587{ 588 struct g_mirror_disk *dp; 589 590 for (dp = LIST_NEXT(disk, d_next); dp != disk; 591 dp = LIST_NEXT(dp, d_next)) { 592 if (dp == NULL) 593 dp = LIST_FIRST(&sc->sc_disks); 594 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 595 break; 596 } 597 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 598 return (NULL); 599 return (dp); 600} 601 602static struct g_mirror_disk * 603g_mirror_get_disk(struct g_mirror_softc *sc) 604{ 605 struct g_mirror_disk *disk; 606 607 if (sc->sc_hint == NULL) { 608 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 609 if (sc->sc_hint == NULL) 610 return (NULL); 611 } 612 disk = sc->sc_hint; 613 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 614 disk = g_mirror_find_next(sc, disk); 615 if (disk == NULL) 616 return (NULL); 617 } 618 sc->sc_hint = g_mirror_find_next(sc, disk); 619 return (disk); 620} 621 622static int 623g_mirror_write_metadata(struct g_mirror_disk *disk, 624 struct g_mirror_metadata *md) 625{ 626 struct g_mirror_softc *sc; 627 struct g_consumer *cp; 628 off_t offset, length; 629 u_char *sector; 630 int error = 0; 631 632 g_topology_assert_not(); 633 sc = disk->d_softc; 634 sx_assert(&sc->sc_lock, SX_LOCKED); 635 636 cp = disk->d_consumer; 637 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 638 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 639 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 640 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 641 cp->acw, cp->ace)); 642 length = cp->provider->sectorsize; 643 offset = cp->provider->mediasize - length; 644 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 645 if (md != NULL) { 646 /* 647 * Handle the case, when the size of parent provider reduced. 648 */ 649 if (offset < md->md_mediasize) 650 error = ENOSPC; 651 else 652 mirror_metadata_encode(md, sector); 653 } 654 if (error == 0) 655 error = g_write_data(cp, offset, sector, length); 656 free(sector, M_MIRROR); 657 if (error != 0) { 658 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 659 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 660 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 661 "(device=%s, error=%d).", 662 g_mirror_get_diskname(disk), sc->sc_name, error); 663 } else { 664 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 665 "(device=%s, error=%d).", 666 g_mirror_get_diskname(disk), sc->sc_name, error); 667 } 668 if (g_mirror_disconnect_on_failure && 669 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 670 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 671 g_mirror_event_send(disk, 672 G_MIRROR_DISK_STATE_DISCONNECTED, 673 G_MIRROR_EVENT_DONTWAIT); 674 } 675 } 676 return (error); 677} 678 679static int 680g_mirror_clear_metadata(struct g_mirror_disk *disk) 681{ 682 int error; 683 684 g_topology_assert_not(); 685 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 686 687 error = g_mirror_write_metadata(disk, NULL); 688 if (error == 0) { 689 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 690 g_mirror_get_diskname(disk)); 691 } else { 692 G_MIRROR_DEBUG(0, 693 "Cannot clear metadata on disk %s (error=%d).", 694 g_mirror_get_diskname(disk), error); 695 } 696 return (error); 697} 698 699void 700g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 701 struct g_mirror_metadata *md) 702{ 703 704 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 705 md->md_version = G_MIRROR_VERSION; 706 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 707 md->md_mid = sc->sc_id; 708 md->md_all = sc->sc_ndisks; 709 md->md_slice = sc->sc_slice; 710 md->md_balance = sc->sc_balance; 711 md->md_genid = sc->sc_genid; 712 md->md_mediasize = sc->sc_mediasize; 713 md->md_sectorsize = sc->sc_sectorsize; 714 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 715 bzero(md->md_provider, sizeof(md->md_provider)); 716 if (disk == NULL) { 717 md->md_did = arc4random(); 718 md->md_priority = 0; 719 md->md_syncid = 0; 720 md->md_dflags = 0; 721 md->md_sync_offset = 0; 722 md->md_provsize = 0; 723 } else { 724 md->md_did = disk->d_id; 725 md->md_priority = disk->d_priority; 726 md->md_syncid = disk->d_sync.ds_syncid; 727 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 728 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 729 md->md_sync_offset = disk->d_sync.ds_offset_done; 730 else 731 md->md_sync_offset = 0; 732 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 733 strlcpy(md->md_provider, 734 disk->d_consumer->provider->name, 735 sizeof(md->md_provider)); 736 } 737 md->md_provsize = disk->d_consumer->provider->mediasize; 738 } 739} 740 741void 742g_mirror_update_metadata(struct g_mirror_disk *disk) 743{ 744 struct g_mirror_softc *sc; 745 struct g_mirror_metadata md; 746 int error; 747 748 g_topology_assert_not(); 749 sc = disk->d_softc; 750 sx_assert(&sc->sc_lock, SX_LOCKED); 751 752 g_mirror_fill_metadata(sc, disk, &md); 753 error = g_mirror_write_metadata(disk, &md); 754 if (error == 0) { 755 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 756 g_mirror_get_diskname(disk)); 757 } else { 758 G_MIRROR_DEBUG(0, 759 "Cannot update metadata on disk %s (error=%d).", 760 g_mirror_get_diskname(disk), error); 761 } 762} 763 764static void 765g_mirror_bump_syncid(struct g_mirror_softc *sc) 766{ 767 struct g_mirror_disk *disk; 768 769 g_topology_assert_not(); 770 sx_assert(&sc->sc_lock, SX_XLOCKED); 771 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 772 ("%s called with no active disks (device=%s).", __func__, 773 sc->sc_name)); 774 775 sc->sc_syncid++; 776 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 777 sc->sc_syncid); 778 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 779 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 780 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 781 disk->d_sync.ds_syncid = sc->sc_syncid; 782 g_mirror_update_metadata(disk); 783 } 784 } 785} 786 787static void 788g_mirror_bump_genid(struct g_mirror_softc *sc) 789{ 790 struct g_mirror_disk *disk; 791 792 g_topology_assert_not(); 793 sx_assert(&sc->sc_lock, SX_XLOCKED); 794 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 795 ("%s called with no active disks (device=%s).", __func__, 796 sc->sc_name)); 797 798 sc->sc_genid++; 799 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 800 sc->sc_genid); 801 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 802 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 803 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 804 disk->d_genid = sc->sc_genid; 805 g_mirror_update_metadata(disk); 806 } 807 } 808} 809 810static int 811g_mirror_idle(struct g_mirror_softc *sc, int acw) 812{ 813 struct g_mirror_disk *disk; 814 int timeout; 815 816 g_topology_assert_not(); 817 sx_assert(&sc->sc_lock, SX_XLOCKED); 818 819 if (sc->sc_provider == NULL) 820 return (0); 821 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 822 return (0); 823 if (sc->sc_idle) 824 return (0); 825 if (sc->sc_writes > 0) 826 return (0); 827 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 828 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 829 if (!g_mirror_shutdown && timeout > 0) 830 return (timeout); 831 } 832 sc->sc_idle = 1; 833 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 834 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 835 continue; 836 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 837 g_mirror_get_diskname(disk), sc->sc_name); 838 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 839 g_mirror_update_metadata(disk); 840 } 841 return (0); 842} 843 844static void 845g_mirror_unidle(struct g_mirror_softc *sc) 846{ 847 struct g_mirror_disk *disk; 848 849 g_topology_assert_not(); 850 sx_assert(&sc->sc_lock, SX_XLOCKED); 851 852 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 853 return; 854 sc->sc_idle = 0; 855 sc->sc_last_write = time_uptime; 856 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 857 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 858 continue; 859 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 860 g_mirror_get_diskname(disk), sc->sc_name); 861 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 862 g_mirror_update_metadata(disk); 863 } 864} 865 866static void 867g_mirror_flush_done(struct bio *bp) 868{ 869 struct g_mirror_softc *sc; 870 struct bio *pbp; 871 872 pbp = bp->bio_parent; 873 sc = pbp->bio_to->geom->softc; 874 mtx_lock(&sc->sc_done_mtx); 875 if (pbp->bio_error == 0) 876 pbp->bio_error = bp->bio_error; 877 pbp->bio_completed += bp->bio_completed; 878 pbp->bio_inbed++; 879 if (pbp->bio_children == pbp->bio_inbed) { 880 mtx_unlock(&sc->sc_done_mtx); 881 g_io_deliver(pbp, pbp->bio_error); 882 } else 883 mtx_unlock(&sc->sc_done_mtx); 884 g_destroy_bio(bp); 885} 886 887static void 888g_mirror_done(struct bio *bp) 889{ 890 struct g_mirror_softc *sc; 891 892 sc = bp->bio_from->geom->softc; 893 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 894 mtx_lock(&sc->sc_queue_mtx); 895 bioq_disksort(&sc->sc_queue, bp); 896 mtx_unlock(&sc->sc_queue_mtx); 897 wakeup(sc); 898} 899 900static void 901g_mirror_regular_request(struct bio *bp) 902{ 903 struct g_mirror_softc *sc; 904 struct g_mirror_disk *disk; 905 struct bio *pbp; 906 907 g_topology_assert_not(); 908 909 pbp = bp->bio_parent; 910 sc = pbp->bio_to->geom->softc; 911 bp->bio_from->index--; 912 if (bp->bio_cmd == BIO_WRITE) 913 sc->sc_writes--; 914 disk = bp->bio_from->private; 915 if (disk == NULL) { 916 g_topology_lock(); 917 g_mirror_kill_consumer(sc, bp->bio_from); 918 g_topology_unlock(); 919 } 920 921 pbp->bio_inbed++; 922 KASSERT(pbp->bio_inbed <= pbp->bio_children, 923 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 924 pbp->bio_children)); 925 if (bp->bio_error == 0 && pbp->bio_error == 0) { 926 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 927 g_destroy_bio(bp); 928 if (pbp->bio_children == pbp->bio_inbed) { 929 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 930 pbp->bio_completed = pbp->bio_length; 931 if (pbp->bio_cmd == BIO_WRITE || 932 pbp->bio_cmd == BIO_DELETE) { 933 bioq_remove(&sc->sc_inflight, pbp); 934 /* Release delayed sync requests if possible. */ 935 g_mirror_sync_release(sc); 936 } 937 g_io_deliver(pbp, pbp->bio_error); 938 } 939 return; 940 } else if (bp->bio_error != 0) { 941 if (pbp->bio_error == 0) 942 pbp->bio_error = bp->bio_error; 943 if (disk != NULL) { 944 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 945 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 946 G_MIRROR_LOGREQ(0, bp, 947 "Request failed (error=%d).", 948 bp->bio_error); 949 } else { 950 G_MIRROR_LOGREQ(1, bp, 951 "Request failed (error=%d).", 952 bp->bio_error); 953 } 954 if (g_mirror_disconnect_on_failure && 955 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 956 { 957 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 958 g_mirror_event_send(disk, 959 G_MIRROR_DISK_STATE_DISCONNECTED, 960 G_MIRROR_EVENT_DONTWAIT); 961 } 962 } 963 switch (pbp->bio_cmd) { 964 case BIO_DELETE: 965 case BIO_WRITE: 966 pbp->bio_inbed--; 967 pbp->bio_children--; 968 break; 969 } 970 } 971 g_destroy_bio(bp); 972 973 switch (pbp->bio_cmd) { 974 case BIO_READ: 975 if (pbp->bio_inbed < pbp->bio_children) 976 break; 977 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 978 g_io_deliver(pbp, pbp->bio_error); 979 else { 980 pbp->bio_error = 0; 981 mtx_lock(&sc->sc_queue_mtx); 982 bioq_disksort(&sc->sc_queue, pbp); 983 mtx_unlock(&sc->sc_queue_mtx); 984 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 985 wakeup(sc); 986 } 987 break; 988 case BIO_DELETE: 989 case BIO_WRITE: 990 if (pbp->bio_children == 0) { 991 /* 992 * All requests failed. 993 */ 994 } else if (pbp->bio_inbed < pbp->bio_children) { 995 /* Do nothing. */ 996 break; 997 } else if (pbp->bio_children == pbp->bio_inbed) { 998 /* Some requests succeeded. */ 999 pbp->bio_error = 0; 1000 pbp->bio_completed = pbp->bio_length; 1001 } 1002 bioq_remove(&sc->sc_inflight, pbp); 1003 /* Release delayed sync requests if possible. */ 1004 g_mirror_sync_release(sc); 1005 g_io_deliver(pbp, pbp->bio_error); 1006 break; 1007 default: 1008 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1009 break; 1010 } 1011} 1012 1013static void 1014g_mirror_sync_done(struct bio *bp) 1015{ 1016 struct g_mirror_softc *sc; 1017 1018 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1019 sc = bp->bio_from->geom->softc; 1020 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1021 mtx_lock(&sc->sc_queue_mtx); 1022 bioq_disksort(&sc->sc_queue, bp); 1023 mtx_unlock(&sc->sc_queue_mtx); 1024 wakeup(sc); 1025} 1026 1027static void 1028g_mirror_kernel_dump(struct bio *bp) 1029{ 1030 struct g_mirror_softc *sc; 1031 struct g_mirror_disk *disk; 1032 struct bio *cbp; 1033 struct g_kerneldump *gkd; 1034 1035 /* 1036 * We configure dumping to the first component, because this component 1037 * will be used for reading with 'prefer' balance algorithm. 1038 * If the component with the higest priority is currently disconnected 1039 * we will not be able to read the dump after the reboot if it will be 1040 * connected and synchronized later. Can we do something better? 1041 */ 1042 sc = bp->bio_to->geom->softc; 1043 disk = LIST_FIRST(&sc->sc_disks); 1044 1045 gkd = (struct g_kerneldump *)bp->bio_data; 1046 if (gkd->length > bp->bio_to->mediasize) 1047 gkd->length = bp->bio_to->mediasize; 1048 cbp = g_clone_bio(bp); 1049 if (cbp == NULL) { 1050 g_io_deliver(bp, ENOMEM); 1051 return; 1052 } 1053 cbp->bio_done = g_std_done; 1054 g_io_request(cbp, disk->d_consumer); 1055 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1056 g_mirror_get_diskname(disk)); 1057} 1058 1059static void 1060g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1061{ 1062 struct bio_queue_head queue; 1063 struct g_mirror_disk *disk; 1064 struct g_consumer *cp; 1065 struct bio *cbp; 1066 1067 bioq_init(&queue); 1068 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1069 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1070 continue; 1071 cbp = g_clone_bio(bp); 1072 if (cbp == NULL) { 1073 while ((cbp = bioq_takefirst(&queue)) != NULL) 1074 g_destroy_bio(cbp); 1075 if (bp->bio_error == 0) 1076 bp->bio_error = ENOMEM; 1077 g_io_deliver(bp, bp->bio_error); 1078 return; 1079 } 1080 bioq_insert_tail(&queue, cbp); 1081 cbp->bio_done = g_mirror_flush_done; 1082 cbp->bio_caller1 = disk; 1083 cbp->bio_to = disk->d_consumer->provider; 1084 } 1085 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1086 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1087 disk = cbp->bio_caller1; 1088 cbp->bio_caller1 = NULL; 1089 cp = disk->d_consumer; 1090 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1091 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1092 cp->acr, cp->acw, cp->ace)); 1093 g_io_request(cbp, disk->d_consumer); 1094 } 1095} 1096 1097static void 1098g_mirror_start(struct bio *bp) 1099{ 1100 struct g_mirror_softc *sc; 1101 1102 sc = bp->bio_to->geom->softc; 1103 /* 1104 * If sc == NULL or there are no valid disks, provider's error 1105 * should be set and g_mirror_start() should not be called at all. 1106 */ 1107 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1108 ("Provider's error should be set (error=%d)(mirror=%s).", 1109 bp->bio_to->error, bp->bio_to->name)); 1110 G_MIRROR_LOGREQ(3, bp, "Request received."); 1111 1112 switch (bp->bio_cmd) { 1113 case BIO_READ: 1114 case BIO_WRITE: 1115 case BIO_DELETE: 1116 break; 1117 case BIO_FLUSH: 1118 g_mirror_flush(sc, bp); 1119 return; 1120 case BIO_GETATTR: 1121 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 1122 return; 1123 else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1124 g_mirror_kernel_dump(bp); 1125 return; 1126 } 1127 /* FALLTHROUGH */ 1128 default: 1129 g_io_deliver(bp, EOPNOTSUPP); 1130 return; 1131 } 1132 mtx_lock(&sc->sc_queue_mtx); 1133 bioq_disksort(&sc->sc_queue, bp); 1134 mtx_unlock(&sc->sc_queue_mtx); 1135 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1136 wakeup(sc); 1137} 1138 1139/* 1140 * Return TRUE if the given request is colliding with a in-progress 1141 * synchronization request. 1142 */ 1143static int 1144g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1145{ 1146 struct g_mirror_disk *disk; 1147 struct bio *sbp; 1148 off_t rstart, rend, sstart, send; 1149 int i; 1150 1151 if (sc->sc_sync.ds_ndisks == 0) 1152 return (0); 1153 rstart = bp->bio_offset; 1154 rend = bp->bio_offset + bp->bio_length; 1155 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1156 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1157 continue; 1158 for (i = 0; i < g_mirror_syncreqs; i++) { 1159 sbp = disk->d_sync.ds_bios[i]; 1160 if (sbp == NULL) 1161 continue; 1162 sstart = sbp->bio_offset; 1163 send = sbp->bio_offset + sbp->bio_length; 1164 if (rend > sstart && rstart < send) 1165 return (1); 1166 } 1167 } 1168 return (0); 1169} 1170 1171/* 1172 * Return TRUE if the given sync request is colliding with a in-progress regular 1173 * request. 1174 */ 1175static int 1176g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1177{ 1178 off_t rstart, rend, sstart, send; 1179 struct bio *bp; 1180 1181 if (sc->sc_sync.ds_ndisks == 0) 1182 return (0); 1183 sstart = sbp->bio_offset; 1184 send = sbp->bio_offset + sbp->bio_length; 1185 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1186 rstart = bp->bio_offset; 1187 rend = bp->bio_offset + bp->bio_length; 1188 if (rend > sstart && rstart < send) 1189 return (1); 1190 } 1191 return (0); 1192} 1193 1194/* 1195 * Puts request onto delayed queue. 1196 */ 1197static void 1198g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1199{ 1200 1201 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1202 bioq_insert_head(&sc->sc_regular_delayed, bp); 1203} 1204 1205/* 1206 * Puts synchronization request onto delayed queue. 1207 */ 1208static void 1209g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1210{ 1211 1212 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1213 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1214} 1215 1216/* 1217 * Releases delayed regular requests which don't collide anymore with sync 1218 * requests. 1219 */ 1220static void 1221g_mirror_regular_release(struct g_mirror_softc *sc) 1222{ 1223 struct bio *bp, *bp2; 1224 1225 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1226 if (g_mirror_sync_collision(sc, bp)) 1227 continue; 1228 bioq_remove(&sc->sc_regular_delayed, bp); 1229 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1230 mtx_lock(&sc->sc_queue_mtx); 1231 bioq_insert_head(&sc->sc_queue, bp); 1232#if 0 1233 /* 1234 * wakeup() is not needed, because this function is called from 1235 * the worker thread. 1236 */ 1237 wakeup(&sc->sc_queue); 1238#endif 1239 mtx_unlock(&sc->sc_queue_mtx); 1240 } 1241} 1242 1243/* 1244 * Releases delayed sync requests which don't collide anymore with regular 1245 * requests. 1246 */ 1247static void 1248g_mirror_sync_release(struct g_mirror_softc *sc) 1249{ 1250 struct bio *bp, *bp2; 1251 1252 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1253 if (g_mirror_regular_collision(sc, bp)) 1254 continue; 1255 bioq_remove(&sc->sc_sync_delayed, bp); 1256 G_MIRROR_LOGREQ(2, bp, 1257 "Releasing delayed synchronization request."); 1258 g_io_request(bp, bp->bio_from); 1259 } 1260} 1261 1262/* 1263 * Handle synchronization requests. 1264 * Every synchronization request is two-steps process: first, READ request is 1265 * send to active provider and then WRITE request (with read data) to the provider 1266 * beeing synchronized. When WRITE is finished, new synchronization request is 1267 * send. 1268 */ 1269static void 1270g_mirror_sync_request(struct bio *bp) 1271{ 1272 struct g_mirror_softc *sc; 1273 struct g_mirror_disk *disk; 1274 1275 bp->bio_from->index--; 1276 sc = bp->bio_from->geom->softc; 1277 disk = bp->bio_from->private; 1278 if (disk == NULL) { 1279 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1280 g_topology_lock(); 1281 g_mirror_kill_consumer(sc, bp->bio_from); 1282 g_topology_unlock(); 1283 free(bp->bio_data, M_MIRROR); 1284 g_destroy_bio(bp); 1285 sx_xlock(&sc->sc_lock); 1286 return; 1287 } 1288 1289 /* 1290 * Synchronization request. 1291 */ 1292 switch (bp->bio_cmd) { 1293 case BIO_READ: 1294 { 1295 struct g_consumer *cp; 1296 1297 if (bp->bio_error != 0) { 1298 G_MIRROR_LOGREQ(0, bp, 1299 "Synchronization request failed (error=%d).", 1300 bp->bio_error); 1301 g_destroy_bio(bp); 1302 return; 1303 } 1304 G_MIRROR_LOGREQ(3, bp, 1305 "Synchronization request half-finished."); 1306 bp->bio_cmd = BIO_WRITE; 1307 bp->bio_cflags = 0; 1308 cp = disk->d_consumer; 1309 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1310 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1311 cp->acr, cp->acw, cp->ace)); 1312 cp->index++; 1313 g_io_request(bp, cp); 1314 return; 1315 } 1316 case BIO_WRITE: 1317 { 1318 struct g_mirror_disk_sync *sync; 1319 off_t offset; 1320 void *data; 1321 int i; 1322 1323 if (bp->bio_error != 0) { 1324 G_MIRROR_LOGREQ(0, bp, 1325 "Synchronization request failed (error=%d).", 1326 bp->bio_error); 1327 g_destroy_bio(bp); 1328 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1329 g_mirror_event_send(disk, 1330 G_MIRROR_DISK_STATE_DISCONNECTED, 1331 G_MIRROR_EVENT_DONTWAIT); 1332 return; 1333 } 1334 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1335 sync = &disk->d_sync; 1336 if (sync->ds_offset >= sc->sc_mediasize || 1337 sync->ds_consumer == NULL || 1338 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1339 /* Don't send more synchronization requests. */ 1340 sync->ds_inflight--; 1341 if (sync->ds_bios != NULL) { 1342 i = (int)(uintptr_t)bp->bio_caller1; 1343 sync->ds_bios[i] = NULL; 1344 } 1345 free(bp->bio_data, M_MIRROR); 1346 g_destroy_bio(bp); 1347 if (sync->ds_inflight > 0) 1348 return; 1349 if (sync->ds_consumer == NULL || 1350 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1351 return; 1352 } 1353 /* Disk up-to-date, activate it. */ 1354 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1355 G_MIRROR_EVENT_DONTWAIT); 1356 return; 1357 } 1358 1359 /* Send next synchronization request. */ 1360 data = bp->bio_data; 1361 bzero(bp, sizeof(*bp)); 1362 bp->bio_cmd = BIO_READ; 1363 bp->bio_offset = sync->ds_offset; 1364 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1365 sync->ds_offset += bp->bio_length; 1366 bp->bio_done = g_mirror_sync_done; 1367 bp->bio_data = data; 1368 bp->bio_from = sync->ds_consumer; 1369 bp->bio_to = sc->sc_provider; 1370 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1371 sync->ds_consumer->index++; 1372 /* 1373 * Delay the request if it is colliding with a regular request. 1374 */ 1375 if (g_mirror_regular_collision(sc, bp)) 1376 g_mirror_sync_delay(sc, bp); 1377 else 1378 g_io_request(bp, sync->ds_consumer); 1379 1380 /* Release delayed requests if possible. */ 1381 g_mirror_regular_release(sc); 1382 1383 /* Find the smallest offset */ 1384 offset = sc->sc_mediasize; 1385 for (i = 0; i < g_mirror_syncreqs; i++) { 1386 bp = sync->ds_bios[i]; 1387 if (bp->bio_offset < offset) 1388 offset = bp->bio_offset; 1389 } 1390 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1391 /* Update offset_done on every 100 blocks. */ 1392 sync->ds_offset_done = offset; 1393 g_mirror_update_metadata(disk); 1394 } 1395 return; 1396 } 1397 default: 1398 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1399 bp->bio_cmd, sc->sc_name)); 1400 break; 1401 } 1402} 1403 1404static void 1405g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1406{ 1407 struct g_mirror_disk *disk; 1408 struct g_consumer *cp; 1409 struct bio *cbp; 1410 1411 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1412 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1413 break; 1414 } 1415 if (disk == NULL) { 1416 if (bp->bio_error == 0) 1417 bp->bio_error = ENXIO; 1418 g_io_deliver(bp, bp->bio_error); 1419 return; 1420 } 1421 cbp = g_clone_bio(bp); 1422 if (cbp == NULL) { 1423 if (bp->bio_error == 0) 1424 bp->bio_error = ENOMEM; 1425 g_io_deliver(bp, bp->bio_error); 1426 return; 1427 } 1428 /* 1429 * Fill in the component buf structure. 1430 */ 1431 cp = disk->d_consumer; 1432 cbp->bio_done = g_mirror_done; 1433 cbp->bio_to = cp->provider; 1434 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1435 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1436 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1437 cp->acw, cp->ace)); 1438 cp->index++; 1439 g_io_request(cbp, cp); 1440} 1441 1442static void 1443g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1444{ 1445 struct g_mirror_disk *disk; 1446 struct g_consumer *cp; 1447 struct bio *cbp; 1448 1449 disk = g_mirror_get_disk(sc); 1450 if (disk == NULL) { 1451 if (bp->bio_error == 0) 1452 bp->bio_error = ENXIO; 1453 g_io_deliver(bp, bp->bio_error); 1454 return; 1455 } 1456 cbp = g_clone_bio(bp); 1457 if (cbp == NULL) { 1458 if (bp->bio_error == 0) 1459 bp->bio_error = ENOMEM; 1460 g_io_deliver(bp, bp->bio_error); 1461 return; 1462 } 1463 /* 1464 * Fill in the component buf structure. 1465 */ 1466 cp = disk->d_consumer; 1467 cbp->bio_done = g_mirror_done; 1468 cbp->bio_to = cp->provider; 1469 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1470 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1471 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1472 cp->acw, cp->ace)); 1473 cp->index++; 1474 g_io_request(cbp, cp); 1475} 1476 1477#define TRACK_SIZE (1 * 1024 * 1024) 1478#define LOAD_SCALE 256 1479#define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1480 1481static void 1482g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1483{ 1484 struct g_mirror_disk *disk, *dp; 1485 struct g_consumer *cp; 1486 struct bio *cbp; 1487 int prio, best; 1488 1489 /* Find a disk with the smallest load. */ 1490 disk = NULL; 1491 best = INT_MAX; 1492 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1493 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1494 continue; 1495 prio = dp->load; 1496 /* If disk head is precisely in position - highly prefer it. */ 1497 if (dp->d_last_offset == bp->bio_offset) 1498 prio -= 2 * LOAD_SCALE; 1499 else 1500 /* If disk head is close to position - prefer it. */ 1501 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1502 prio -= 1 * LOAD_SCALE; 1503 if (prio <= best) { 1504 disk = dp; 1505 best = prio; 1506 } 1507 } 1508 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1509 cbp = g_clone_bio(bp); 1510 if (cbp == NULL) { 1511 if (bp->bio_error == 0) 1512 bp->bio_error = ENOMEM; 1513 g_io_deliver(bp, bp->bio_error); 1514 return; 1515 } 1516 /* 1517 * Fill in the component buf structure. 1518 */ 1519 cp = disk->d_consumer; 1520 cbp->bio_done = g_mirror_done; 1521 cbp->bio_to = cp->provider; 1522 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1523 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1524 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1525 cp->acw, cp->ace)); 1526 cp->index++; 1527 /* Remember last head position */ 1528 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1529 /* Update loads. */ 1530 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1531 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1532 dp->load * 7) / 8; 1533 } 1534 g_io_request(cbp, cp); 1535} 1536 1537static void 1538g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1539{ 1540 struct bio_queue_head queue; 1541 struct g_mirror_disk *disk; 1542 struct g_consumer *cp; 1543 struct bio *cbp; 1544 off_t left, mod, offset, slice; 1545 u_char *data; 1546 u_int ndisks; 1547 1548 if (bp->bio_length <= sc->sc_slice) { 1549 g_mirror_request_round_robin(sc, bp); 1550 return; 1551 } 1552 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1553 slice = bp->bio_length / ndisks; 1554 mod = slice % sc->sc_provider->sectorsize; 1555 if (mod != 0) 1556 slice += sc->sc_provider->sectorsize - mod; 1557 /* 1558 * Allocate all bios before sending any request, so we can 1559 * return ENOMEM in nice and clean way. 1560 */ 1561 left = bp->bio_length; 1562 offset = bp->bio_offset; 1563 data = bp->bio_data; 1564 bioq_init(&queue); 1565 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1566 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1567 continue; 1568 cbp = g_clone_bio(bp); 1569 if (cbp == NULL) { 1570 while ((cbp = bioq_takefirst(&queue)) != NULL) 1571 bioq_remove(&queue, cbp); 1572 if (bp->bio_error == 0) 1573 bp->bio_error = ENOMEM; 1574 g_io_deliver(bp, bp->bio_error); 1575 return; 1576 } 1577 bioq_insert_tail(&queue, cbp); 1578 cbp->bio_done = g_mirror_done; 1579 cbp->bio_caller1 = disk; 1580 cbp->bio_to = disk->d_consumer->provider; 1581 cbp->bio_offset = offset; 1582 cbp->bio_data = data; 1583 cbp->bio_length = MIN(left, slice); 1584 left -= cbp->bio_length; 1585 if (left == 0) 1586 break; 1587 offset += cbp->bio_length; 1588 data += cbp->bio_length; 1589 } 1590 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1591 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1592 disk = cbp->bio_caller1; 1593 cbp->bio_caller1 = NULL; 1594 cp = disk->d_consumer; 1595 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1596 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1597 cp->acr, cp->acw, cp->ace)); 1598 disk->d_consumer->index++; 1599 g_io_request(cbp, disk->d_consumer); 1600 } 1601} 1602 1603static void 1604g_mirror_register_request(struct bio *bp) 1605{ 1606 struct g_mirror_softc *sc; 1607 1608 sc = bp->bio_to->geom->softc; 1609 switch (bp->bio_cmd) { 1610 case BIO_READ: 1611 switch (sc->sc_balance) { 1612 case G_MIRROR_BALANCE_LOAD: 1613 g_mirror_request_load(sc, bp); 1614 break; 1615 case G_MIRROR_BALANCE_PREFER: 1616 g_mirror_request_prefer(sc, bp); 1617 break; 1618 case G_MIRROR_BALANCE_ROUND_ROBIN: 1619 g_mirror_request_round_robin(sc, bp); 1620 break; 1621 case G_MIRROR_BALANCE_SPLIT: 1622 g_mirror_request_split(sc, bp); 1623 break; 1624 } 1625 return; 1626 case BIO_WRITE: 1627 case BIO_DELETE: 1628 { 1629 struct g_mirror_disk *disk; 1630 struct g_mirror_disk_sync *sync; 1631 struct bio_queue_head queue; 1632 struct g_consumer *cp; 1633 struct bio *cbp; 1634 1635 /* 1636 * Delay the request if it is colliding with a synchronization 1637 * request. 1638 */ 1639 if (g_mirror_sync_collision(sc, bp)) { 1640 g_mirror_regular_delay(sc, bp); 1641 return; 1642 } 1643 1644 if (sc->sc_idle) 1645 g_mirror_unidle(sc); 1646 else 1647 sc->sc_last_write = time_uptime; 1648 1649 /* 1650 * Allocate all bios before sending any request, so we can 1651 * return ENOMEM in nice and clean way. 1652 */ 1653 bioq_init(&queue); 1654 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1655 sync = &disk->d_sync; 1656 switch (disk->d_state) { 1657 case G_MIRROR_DISK_STATE_ACTIVE: 1658 break; 1659 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1660 if (bp->bio_offset >= sync->ds_offset) 1661 continue; 1662 break; 1663 default: 1664 continue; 1665 } 1666 if (bp->bio_cmd == BIO_DELETE && 1667 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1668 continue; 1669 cbp = g_clone_bio(bp); 1670 if (cbp == NULL) { 1671 while ((cbp = bioq_takefirst(&queue)) != NULL) 1672 g_destroy_bio(cbp); 1673 if (bp->bio_error == 0) 1674 bp->bio_error = ENOMEM; 1675 g_io_deliver(bp, bp->bio_error); 1676 return; 1677 } 1678 bioq_insert_tail(&queue, cbp); 1679 cbp->bio_done = g_mirror_done; 1680 cp = disk->d_consumer; 1681 cbp->bio_caller1 = cp; 1682 cbp->bio_to = cp->provider; 1683 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1684 ("Consumer %s not opened (r%dw%de%d).", 1685 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1686 } 1687 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1688 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1689 cp = cbp->bio_caller1; 1690 cbp->bio_caller1 = NULL; 1691 cp->index++; 1692 sc->sc_writes++; 1693 g_io_request(cbp, cp); 1694 } 1695 /* 1696 * Put request onto inflight queue, so we can check if new 1697 * synchronization requests don't collide with it. 1698 */ 1699 bioq_insert_tail(&sc->sc_inflight, bp); 1700 /* 1701 * Bump syncid on first write. 1702 */ 1703 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1704 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1705 g_mirror_bump_syncid(sc); 1706 } 1707 return; 1708 } 1709 default: 1710 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1711 bp->bio_cmd, sc->sc_name)); 1712 break; 1713 } 1714} 1715 1716static int 1717g_mirror_can_destroy(struct g_mirror_softc *sc) 1718{ 1719 struct g_geom *gp; 1720 struct g_consumer *cp; 1721 1722 g_topology_assert(); 1723 gp = sc->sc_geom; 1724 if (gp->softc == NULL) 1725 return (1); 1726 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1727 return (0); 1728 LIST_FOREACH(cp, &gp->consumer, consumer) { 1729 if (g_mirror_is_busy(sc, cp)) 1730 return (0); 1731 } 1732 gp = sc->sc_sync.ds_geom; 1733 LIST_FOREACH(cp, &gp->consumer, consumer) { 1734 if (g_mirror_is_busy(sc, cp)) 1735 return (0); 1736 } 1737 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1738 sc->sc_name); 1739 return (1); 1740} 1741 1742static int 1743g_mirror_try_destroy(struct g_mirror_softc *sc) 1744{ 1745 1746 if (sc->sc_rootmount != NULL) { 1747 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1748 sc->sc_rootmount); 1749 root_mount_rel(sc->sc_rootmount); 1750 sc->sc_rootmount = NULL; 1751 } 1752 g_topology_lock(); 1753 if (!g_mirror_can_destroy(sc)) { 1754 g_topology_unlock(); 1755 return (0); 1756 } 1757 sc->sc_geom->softc = NULL; 1758 sc->sc_sync.ds_geom->softc = NULL; 1759 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1760 g_topology_unlock(); 1761 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1762 &sc->sc_worker); 1763 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1764 sx_xunlock(&sc->sc_lock); 1765 wakeup(&sc->sc_worker); 1766 sc->sc_worker = NULL; 1767 } else { 1768 g_topology_unlock(); 1769 g_mirror_destroy_device(sc); 1770 free(sc, M_MIRROR); 1771 } 1772 return (1); 1773} 1774 1775/* 1776 * Worker thread. 1777 */ 1778static void 1779g_mirror_worker(void *arg) 1780{ 1781 struct g_mirror_softc *sc; 1782 struct g_mirror_event *ep; 1783 struct bio *bp; 1784 int timeout; 1785 1786 sc = arg; 1787 thread_lock(curthread); 1788 sched_prio(curthread, PRIBIO); 1789 thread_unlock(curthread); 1790 1791 sx_xlock(&sc->sc_lock); 1792 for (;;) { 1793 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1794 /* 1795 * First take a look at events. 1796 * This is important to handle events before any I/O requests. 1797 */ 1798 ep = g_mirror_event_get(sc); 1799 if (ep != NULL) { 1800 g_mirror_event_remove(sc, ep); 1801 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1802 /* Update only device status. */ 1803 G_MIRROR_DEBUG(3, 1804 "Running event for device %s.", 1805 sc->sc_name); 1806 ep->e_error = 0; 1807 g_mirror_update_device(sc, 1); 1808 } else { 1809 /* Update disk status. */ 1810 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1811 g_mirror_get_diskname(ep->e_disk)); 1812 ep->e_error = g_mirror_update_disk(ep->e_disk, 1813 ep->e_state); 1814 if (ep->e_error == 0) 1815 g_mirror_update_device(sc, 0); 1816 } 1817 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1818 KASSERT(ep->e_error == 0, 1819 ("Error cannot be handled.")); 1820 g_mirror_event_free(ep); 1821 } else { 1822 ep->e_flags |= G_MIRROR_EVENT_DONE; 1823 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1824 ep); 1825 mtx_lock(&sc->sc_events_mtx); 1826 wakeup(ep); 1827 mtx_unlock(&sc->sc_events_mtx); 1828 } 1829 if ((sc->sc_flags & 1830 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1831 if (g_mirror_try_destroy(sc)) { 1832 curthread->td_pflags &= ~TDP_GEOM; 1833 G_MIRROR_DEBUG(1, "Thread exiting."); 1834 kproc_exit(0); 1835 } 1836 } 1837 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1838 continue; 1839 } 1840 /* 1841 * Check if we can mark array as CLEAN and if we can't take 1842 * how much seconds should we wait. 1843 */ 1844 timeout = g_mirror_idle(sc, -1); 1845 /* 1846 * Now I/O requests. 1847 */ 1848 /* Get first request from the queue. */ 1849 mtx_lock(&sc->sc_queue_mtx); 1850 bp = bioq_first(&sc->sc_queue); 1851 if (bp == NULL) { 1852 if ((sc->sc_flags & 1853 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1854 mtx_unlock(&sc->sc_queue_mtx); 1855 if (g_mirror_try_destroy(sc)) { 1856 curthread->td_pflags &= ~TDP_GEOM; 1857 G_MIRROR_DEBUG(1, "Thread exiting."); 1858 kproc_exit(0); 1859 } 1860 mtx_lock(&sc->sc_queue_mtx); 1861 } 1862 sx_xunlock(&sc->sc_lock); 1863 /* 1864 * XXX: We can miss an event here, because an event 1865 * can be added without sx-device-lock and without 1866 * mtx-queue-lock. Maybe I should just stop using 1867 * dedicated mutex for events synchronization and 1868 * stick with the queue lock? 1869 * The event will hang here until next I/O request 1870 * or next event is received. 1871 */ 1872 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1873 timeout * hz); 1874 sx_xlock(&sc->sc_lock); 1875 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1876 continue; 1877 } 1878 bioq_remove(&sc->sc_queue, bp); 1879 mtx_unlock(&sc->sc_queue_mtx); 1880 1881 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1882 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1883 g_mirror_sync_request(bp); /* READ */ 1884 } else if (bp->bio_to != sc->sc_provider) { 1885 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1886 g_mirror_regular_request(bp); 1887 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1888 g_mirror_sync_request(bp); /* WRITE */ 1889 else { 1890 KASSERT(0, 1891 ("Invalid request cflags=0x%hhx to=%s.", 1892 bp->bio_cflags, bp->bio_to->name)); 1893 } 1894 } else { 1895 g_mirror_register_request(bp); 1896 } 1897 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1898 } 1899} 1900 1901static void 1902g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1903{ 1904 1905 sx_assert(&sc->sc_lock, SX_LOCKED); 1906 1907 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1908 return; 1909 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1910 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1911 g_mirror_get_diskname(disk), sc->sc_name); 1912 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1913 } else if (sc->sc_idle && 1914 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1915 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1916 g_mirror_get_diskname(disk), sc->sc_name); 1917 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1918 } 1919} 1920 1921static void 1922g_mirror_sync_start(struct g_mirror_disk *disk) 1923{ 1924 struct g_mirror_softc *sc; 1925 struct g_consumer *cp; 1926 struct bio *bp; 1927 int error, i; 1928 1929 g_topology_assert_not(); 1930 sc = disk->d_softc; 1931 sx_assert(&sc->sc_lock, SX_LOCKED); 1932 1933 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1934 ("Disk %s is not marked for synchronization.", 1935 g_mirror_get_diskname(disk))); 1936 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1937 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1938 sc->sc_state)); 1939 1940 sx_xunlock(&sc->sc_lock); 1941 g_topology_lock(); 1942 cp = g_new_consumer(sc->sc_sync.ds_geom); 1943 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1944 error = g_attach(cp, sc->sc_provider); 1945 KASSERT(error == 0, 1946 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1947 error = g_access(cp, 1, 0, 0); 1948 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1949 g_topology_unlock(); 1950 sx_xlock(&sc->sc_lock); 1951 1952 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1953 g_mirror_get_diskname(disk)); 1954 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1955 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1956 KASSERT(disk->d_sync.ds_consumer == NULL, 1957 ("Sync consumer already exists (device=%s, disk=%s).", 1958 sc->sc_name, g_mirror_get_diskname(disk))); 1959 1960 disk->d_sync.ds_consumer = cp; 1961 disk->d_sync.ds_consumer->private = disk; 1962 disk->d_sync.ds_consumer->index = 0; 1963 1964 /* 1965 * Allocate memory for synchronization bios and initialize them. 1966 */ 1967 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1968 M_MIRROR, M_WAITOK); 1969 for (i = 0; i < g_mirror_syncreqs; i++) { 1970 bp = g_alloc_bio(); 1971 disk->d_sync.ds_bios[i] = bp; 1972 bp->bio_parent = NULL; 1973 bp->bio_cmd = BIO_READ; 1974 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1975 bp->bio_cflags = 0; 1976 bp->bio_offset = disk->d_sync.ds_offset; 1977 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1978 disk->d_sync.ds_offset += bp->bio_length; 1979 bp->bio_done = g_mirror_sync_done; 1980 bp->bio_from = disk->d_sync.ds_consumer; 1981 bp->bio_to = sc->sc_provider; 1982 bp->bio_caller1 = (void *)(uintptr_t)i; 1983 } 1984 1985 /* Increase the number of disks in SYNCHRONIZING state. */ 1986 sc->sc_sync.ds_ndisks++; 1987 /* Set the number of in-flight synchronization requests. */ 1988 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1989 1990 /* 1991 * Fire off first synchronization requests. 1992 */ 1993 for (i = 0; i < g_mirror_syncreqs; i++) { 1994 bp = disk->d_sync.ds_bios[i]; 1995 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1996 disk->d_sync.ds_consumer->index++; 1997 /* 1998 * Delay the request if it is colliding with a regular request. 1999 */ 2000 if (g_mirror_regular_collision(sc, bp)) 2001 g_mirror_sync_delay(sc, bp); 2002 else 2003 g_io_request(bp, disk->d_sync.ds_consumer); 2004 } 2005} 2006 2007/* 2008 * Stop synchronization process. 2009 * type: 0 - synchronization finished 2010 * 1 - synchronization stopped 2011 */ 2012static void 2013g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2014{ 2015 struct g_mirror_softc *sc; 2016 struct g_consumer *cp; 2017 2018 g_topology_assert_not(); 2019 sc = disk->d_softc; 2020 sx_assert(&sc->sc_lock, SX_LOCKED); 2021 2022 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2023 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2024 g_mirror_disk_state2str(disk->d_state))); 2025 if (disk->d_sync.ds_consumer == NULL) 2026 return; 2027 2028 if (type == 0) { 2029 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2030 sc->sc_name, g_mirror_get_diskname(disk)); 2031 } else /* if (type == 1) */ { 2032 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2033 sc->sc_name, g_mirror_get_diskname(disk)); 2034 } 2035 free(disk->d_sync.ds_bios, M_MIRROR); 2036 disk->d_sync.ds_bios = NULL; 2037 cp = disk->d_sync.ds_consumer; 2038 disk->d_sync.ds_consumer = NULL; 2039 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2040 sc->sc_sync.ds_ndisks--; 2041 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2042 g_topology_lock(); 2043 g_mirror_kill_consumer(sc, cp); 2044 g_topology_unlock(); 2045 sx_xlock(&sc->sc_lock); 2046} 2047 2048static void 2049g_mirror_launch_provider(struct g_mirror_softc *sc) 2050{ 2051 struct g_mirror_disk *disk; 2052 struct g_provider *pp, *dp; 2053 2054 sx_assert(&sc->sc_lock, SX_LOCKED); 2055 2056 g_topology_lock(); 2057 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2058 pp->flags |= G_PF_DIRECT_RECEIVE; 2059 pp->mediasize = sc->sc_mediasize; 2060 pp->sectorsize = sc->sc_sectorsize; 2061 pp->stripesize = 0; 2062 pp->stripeoffset = 0; 2063 2064 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2065 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2066 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2067 2068 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2069 if (disk->d_consumer && disk->d_consumer->provider) { 2070 dp = disk->d_consumer->provider; 2071 if (dp->stripesize > pp->stripesize) { 2072 pp->stripesize = dp->stripesize; 2073 pp->stripeoffset = dp->stripeoffset; 2074 } 2075 /* A provider underneath us doesn't support unmapped */ 2076 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2077 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2078 "because of %s.", dp->name); 2079 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2080 } 2081 } 2082 } 2083 sc->sc_provider = pp; 2084 g_error_provider(pp, 0); 2085 g_topology_unlock(); 2086 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2087 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2088 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2089 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2090 g_mirror_sync_start(disk); 2091 } 2092} 2093 2094static void 2095g_mirror_destroy_provider(struct g_mirror_softc *sc) 2096{ 2097 struct g_mirror_disk *disk; 2098 struct bio *bp; 2099 2100 g_topology_assert_not(); 2101 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2102 sc->sc_name)); 2103 2104 g_topology_lock(); 2105 g_error_provider(sc->sc_provider, ENXIO); 2106 mtx_lock(&sc->sc_queue_mtx); 2107 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) 2108 g_io_deliver(bp, ENXIO); 2109 mtx_unlock(&sc->sc_queue_mtx); 2110 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2111 sc->sc_provider->name); 2112 sc->sc_provider->flags |= G_PF_WITHER; 2113 g_orphan_provider(sc->sc_provider, ENXIO); 2114 g_topology_unlock(); 2115 sc->sc_provider = NULL; 2116 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2117 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2118 g_mirror_sync_stop(disk, 1); 2119 } 2120} 2121 2122static void 2123g_mirror_go(void *arg) 2124{ 2125 struct g_mirror_softc *sc; 2126 2127 sc = arg; 2128 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2129 g_mirror_event_send(sc, 0, 2130 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2131} 2132 2133static u_int 2134g_mirror_determine_state(struct g_mirror_disk *disk) 2135{ 2136 struct g_mirror_softc *sc; 2137 u_int state; 2138 2139 sc = disk->d_softc; 2140 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2141 if ((disk->d_flags & 2142 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2143 /* Disk does not need synchronization. */ 2144 state = G_MIRROR_DISK_STATE_ACTIVE; 2145 } else { 2146 if ((sc->sc_flags & 2147 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2148 (disk->d_flags & 2149 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2150 /* 2151 * We can start synchronization from 2152 * the stored offset. 2153 */ 2154 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2155 } else { 2156 state = G_MIRROR_DISK_STATE_STALE; 2157 } 2158 } 2159 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2160 /* 2161 * Reset all synchronization data for this disk, 2162 * because if it even was synchronized, it was 2163 * synchronized to disks with different syncid. 2164 */ 2165 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2166 disk->d_sync.ds_offset = 0; 2167 disk->d_sync.ds_offset_done = 0; 2168 disk->d_sync.ds_syncid = sc->sc_syncid; 2169 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2170 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2171 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2172 } else { 2173 state = G_MIRROR_DISK_STATE_STALE; 2174 } 2175 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2176 /* 2177 * Not good, NOT GOOD! 2178 * It means that mirror was started on stale disks 2179 * and more fresh disk just arrive. 2180 * If there were writes, mirror is broken, sorry. 2181 * I think the best choice here is don't touch 2182 * this disk and inform the user loudly. 2183 */ 2184 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2185 "disk (%s) arrives!! It will not be connected to the " 2186 "running device.", sc->sc_name, 2187 g_mirror_get_diskname(disk)); 2188 g_mirror_destroy_disk(disk); 2189 state = G_MIRROR_DISK_STATE_NONE; 2190 /* Return immediately, because disk was destroyed. */ 2191 return (state); 2192 } 2193 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2194 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2195 return (state); 2196} 2197 2198/* 2199 * Update device state. 2200 */ 2201static void 2202g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2203{ 2204 struct g_mirror_disk *disk; 2205 u_int state; 2206 2207 sx_assert(&sc->sc_lock, SX_XLOCKED); 2208 2209 switch (sc->sc_state) { 2210 case G_MIRROR_DEVICE_STATE_STARTING: 2211 { 2212 struct g_mirror_disk *pdisk, *tdisk; 2213 u_int dirty, ndisks, genid, syncid; 2214 2215 KASSERT(sc->sc_provider == NULL, 2216 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2217 /* 2218 * Are we ready? We are, if all disks are connected or 2219 * if we have any disks and 'force' is true. 2220 */ 2221 ndisks = g_mirror_ndisks(sc, -1); 2222 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2223 ; 2224 } else if (ndisks == 0) { 2225 /* 2226 * Disks went down in starting phase, so destroy 2227 * device. 2228 */ 2229 callout_drain(&sc->sc_callout); 2230 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2231 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2232 sc->sc_rootmount); 2233 root_mount_rel(sc->sc_rootmount); 2234 sc->sc_rootmount = NULL; 2235 return; 2236 } else { 2237 return; 2238 } 2239 2240 /* 2241 * Activate all disks with the biggest syncid. 2242 */ 2243 if (force) { 2244 /* 2245 * If 'force' is true, we have been called due to 2246 * timeout, so don't bother canceling timeout. 2247 */ 2248 ndisks = 0; 2249 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2250 if ((disk->d_flags & 2251 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2252 ndisks++; 2253 } 2254 } 2255 if (ndisks == 0) { 2256 /* No valid disks found, destroy device. */ 2257 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2258 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2259 __LINE__, sc->sc_rootmount); 2260 root_mount_rel(sc->sc_rootmount); 2261 sc->sc_rootmount = NULL; 2262 return; 2263 } 2264 } else { 2265 /* Cancel timeout. */ 2266 callout_drain(&sc->sc_callout); 2267 } 2268 2269 /* 2270 * Find the biggest genid. 2271 */ 2272 genid = 0; 2273 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2274 if (disk->d_genid > genid) 2275 genid = disk->d_genid; 2276 } 2277 sc->sc_genid = genid; 2278 /* 2279 * Remove all disks without the biggest genid. 2280 */ 2281 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2282 if (disk->d_genid < genid) { 2283 G_MIRROR_DEBUG(0, 2284 "Component %s (device %s) broken, skipping.", 2285 g_mirror_get_diskname(disk), sc->sc_name); 2286 g_mirror_destroy_disk(disk); 2287 } 2288 } 2289 2290 /* 2291 * Find the biggest syncid. 2292 */ 2293 syncid = 0; 2294 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2295 if (disk->d_sync.ds_syncid > syncid) 2296 syncid = disk->d_sync.ds_syncid; 2297 } 2298 2299 /* 2300 * Here we need to look for dirty disks and if all disks 2301 * with the biggest syncid are dirty, we have to choose 2302 * one with the biggest priority and rebuild the rest. 2303 */ 2304 /* 2305 * Find the number of dirty disks with the biggest syncid. 2306 * Find the number of disks with the biggest syncid. 2307 * While here, find a disk with the biggest priority. 2308 */ 2309 dirty = ndisks = 0; 2310 pdisk = NULL; 2311 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2312 if (disk->d_sync.ds_syncid != syncid) 2313 continue; 2314 if ((disk->d_flags & 2315 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2316 continue; 2317 } 2318 ndisks++; 2319 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2320 dirty++; 2321 if (pdisk == NULL || 2322 pdisk->d_priority < disk->d_priority) { 2323 pdisk = disk; 2324 } 2325 } 2326 } 2327 if (dirty == 0) { 2328 /* No dirty disks at all, great. */ 2329 } else if (dirty == ndisks) { 2330 /* 2331 * Force synchronization for all dirty disks except one 2332 * with the biggest priority. 2333 */ 2334 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2335 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2336 "master disk for synchronization.", 2337 g_mirror_get_diskname(pdisk), sc->sc_name); 2338 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2339 if (disk->d_sync.ds_syncid != syncid) 2340 continue; 2341 if ((disk->d_flags & 2342 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2343 continue; 2344 } 2345 KASSERT((disk->d_flags & 2346 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2347 ("Disk %s isn't marked as dirty.", 2348 g_mirror_get_diskname(disk))); 2349 /* Skip the disk with the biggest priority. */ 2350 if (disk == pdisk) 2351 continue; 2352 disk->d_sync.ds_syncid = 0; 2353 } 2354 } else if (dirty < ndisks) { 2355 /* 2356 * Force synchronization for all dirty disks. 2357 * We have some non-dirty disks. 2358 */ 2359 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2360 if (disk->d_sync.ds_syncid != syncid) 2361 continue; 2362 if ((disk->d_flags & 2363 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2364 continue; 2365 } 2366 if ((disk->d_flags & 2367 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2368 continue; 2369 } 2370 disk->d_sync.ds_syncid = 0; 2371 } 2372 } 2373 2374 /* Reset hint. */ 2375 sc->sc_hint = NULL; 2376 sc->sc_syncid = syncid; 2377 if (force) { 2378 /* Remember to bump syncid on first write. */ 2379 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2380 } 2381 state = G_MIRROR_DEVICE_STATE_RUNNING; 2382 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2383 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2384 g_mirror_device_state2str(state)); 2385 sc->sc_state = state; 2386 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2387 state = g_mirror_determine_state(disk); 2388 g_mirror_event_send(disk, state, 2389 G_MIRROR_EVENT_DONTWAIT); 2390 if (state == G_MIRROR_DISK_STATE_STALE) 2391 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2392 } 2393 break; 2394 } 2395 case G_MIRROR_DEVICE_STATE_RUNNING: 2396 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2397 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2398 /* 2399 * No active disks or no disks at all, 2400 * so destroy device. 2401 */ 2402 if (sc->sc_provider != NULL) 2403 g_mirror_destroy_provider(sc); 2404 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2405 break; 2406 } else if (g_mirror_ndisks(sc, 2407 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2408 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2409 /* 2410 * We have active disks, launch provider if it doesn't 2411 * exist. 2412 */ 2413 if (sc->sc_provider == NULL) 2414 g_mirror_launch_provider(sc); 2415 if (sc->sc_rootmount != NULL) { 2416 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2417 __LINE__, sc->sc_rootmount); 2418 root_mount_rel(sc->sc_rootmount); 2419 sc->sc_rootmount = NULL; 2420 } 2421 } 2422 /* 2423 * Genid should be bumped immediately, so do it here. 2424 */ 2425 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2426 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2427 g_mirror_bump_genid(sc); 2428 } 2429 break; 2430 default: 2431 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2432 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2433 break; 2434 } 2435} 2436 2437/* 2438 * Update disk state and device state if needed. 2439 */ 2440#define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2441 "Disk %s state changed from %s to %s (device %s).", \ 2442 g_mirror_get_diskname(disk), \ 2443 g_mirror_disk_state2str(disk->d_state), \ 2444 g_mirror_disk_state2str(state), sc->sc_name) 2445static int 2446g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2447{ 2448 struct g_mirror_softc *sc; 2449 2450 sc = disk->d_softc; 2451 sx_assert(&sc->sc_lock, SX_XLOCKED); 2452 2453again: 2454 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2455 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2456 g_mirror_disk_state2str(state)); 2457 switch (state) { 2458 case G_MIRROR_DISK_STATE_NEW: 2459 /* 2460 * Possible scenarios: 2461 * 1. New disk arrive. 2462 */ 2463 /* Previous state should be NONE. */ 2464 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2465 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2466 g_mirror_disk_state2str(disk->d_state))); 2467 DISK_STATE_CHANGED(); 2468 2469 disk->d_state = state; 2470 if (LIST_EMPTY(&sc->sc_disks)) 2471 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2472 else { 2473 struct g_mirror_disk *dp; 2474 2475 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2476 if (disk->d_priority >= dp->d_priority) { 2477 LIST_INSERT_BEFORE(dp, disk, d_next); 2478 dp = NULL; 2479 break; 2480 } 2481 if (LIST_NEXT(dp, d_next) == NULL) 2482 break; 2483 } 2484 if (dp != NULL) 2485 LIST_INSERT_AFTER(dp, disk, d_next); 2486 } 2487 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2488 sc->sc_name, g_mirror_get_diskname(disk)); 2489 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2490 break; 2491 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2492 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2493 g_mirror_device_state2str(sc->sc_state), 2494 g_mirror_get_diskname(disk), 2495 g_mirror_disk_state2str(disk->d_state))); 2496 state = g_mirror_determine_state(disk); 2497 if (state != G_MIRROR_DISK_STATE_NONE) 2498 goto again; 2499 break; 2500 case G_MIRROR_DISK_STATE_ACTIVE: 2501 /* 2502 * Possible scenarios: 2503 * 1. New disk does not need synchronization. 2504 * 2. Synchronization process finished successfully. 2505 */ 2506 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2507 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2508 g_mirror_device_state2str(sc->sc_state), 2509 g_mirror_get_diskname(disk), 2510 g_mirror_disk_state2str(disk->d_state))); 2511 /* Previous state should be NEW or SYNCHRONIZING. */ 2512 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2513 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2514 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2515 g_mirror_disk_state2str(disk->d_state))); 2516 DISK_STATE_CHANGED(); 2517 2518 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2519 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2520 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2521 g_mirror_sync_stop(disk, 0); 2522 } 2523 disk->d_state = state; 2524 disk->d_sync.ds_offset = 0; 2525 disk->d_sync.ds_offset_done = 0; 2526 g_mirror_update_idle(sc, disk); 2527 g_mirror_update_metadata(disk); 2528 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2529 sc->sc_name, g_mirror_get_diskname(disk)); 2530 break; 2531 case G_MIRROR_DISK_STATE_STALE: 2532 /* 2533 * Possible scenarios: 2534 * 1. Stale disk was connected. 2535 */ 2536 /* Previous state should be NEW. */ 2537 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2538 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2539 g_mirror_disk_state2str(disk->d_state))); 2540 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2541 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2542 g_mirror_device_state2str(sc->sc_state), 2543 g_mirror_get_diskname(disk), 2544 g_mirror_disk_state2str(disk->d_state))); 2545 /* 2546 * STALE state is only possible if device is marked 2547 * NOAUTOSYNC. 2548 */ 2549 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2550 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2551 g_mirror_device_state2str(sc->sc_state), 2552 g_mirror_get_diskname(disk), 2553 g_mirror_disk_state2str(disk->d_state))); 2554 DISK_STATE_CHANGED(); 2555 2556 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2557 disk->d_state = state; 2558 g_mirror_update_metadata(disk); 2559 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2560 sc->sc_name, g_mirror_get_diskname(disk)); 2561 break; 2562 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2563 /* 2564 * Possible scenarios: 2565 * 1. Disk which needs synchronization was connected. 2566 */ 2567 /* Previous state should be NEW. */ 2568 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2569 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2570 g_mirror_disk_state2str(disk->d_state))); 2571 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2572 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2573 g_mirror_device_state2str(sc->sc_state), 2574 g_mirror_get_diskname(disk), 2575 g_mirror_disk_state2str(disk->d_state))); 2576 DISK_STATE_CHANGED(); 2577 2578 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2579 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2580 disk->d_state = state; 2581 if (sc->sc_provider != NULL) { 2582 g_mirror_sync_start(disk); 2583 g_mirror_update_metadata(disk); 2584 } 2585 break; 2586 case G_MIRROR_DISK_STATE_DISCONNECTED: 2587 /* 2588 * Possible scenarios: 2589 * 1. Device wasn't running yet, but disk disappear. 2590 * 2. Disk was active and disapppear. 2591 * 3. Disk disappear during synchronization process. 2592 */ 2593 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2594 /* 2595 * Previous state should be ACTIVE, STALE or 2596 * SYNCHRONIZING. 2597 */ 2598 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2599 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2600 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2601 ("Wrong disk state (%s, %s).", 2602 g_mirror_get_diskname(disk), 2603 g_mirror_disk_state2str(disk->d_state))); 2604 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2605 /* Previous state should be NEW. */ 2606 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2607 ("Wrong disk state (%s, %s).", 2608 g_mirror_get_diskname(disk), 2609 g_mirror_disk_state2str(disk->d_state))); 2610 /* 2611 * Reset bumping syncid if disk disappeared in STARTING 2612 * state. 2613 */ 2614 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2615 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2616#ifdef INVARIANTS 2617 } else { 2618 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2619 sc->sc_name, 2620 g_mirror_device_state2str(sc->sc_state), 2621 g_mirror_get_diskname(disk), 2622 g_mirror_disk_state2str(disk->d_state))); 2623#endif 2624 } 2625 DISK_STATE_CHANGED(); 2626 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2627 sc->sc_name, g_mirror_get_diskname(disk)); 2628 2629 g_mirror_destroy_disk(disk); 2630 break; 2631 case G_MIRROR_DISK_STATE_DESTROY: 2632 { 2633 int error; 2634 2635 error = g_mirror_clear_metadata(disk); 2636 if (error != 0) 2637 return (error); 2638 DISK_STATE_CHANGED(); 2639 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2640 sc->sc_name, g_mirror_get_diskname(disk)); 2641 2642 g_mirror_destroy_disk(disk); 2643 sc->sc_ndisks--; 2644 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2645 g_mirror_update_metadata(disk); 2646 } 2647 break; 2648 } 2649 default: 2650 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2651 break; 2652 } 2653 return (0); 2654} 2655#undef DISK_STATE_CHANGED 2656 2657int 2658g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2659{ 2660 struct g_provider *pp; 2661 u_char *buf; 2662 int error; 2663 2664 g_topology_assert(); 2665 2666 error = g_access(cp, 1, 0, 0); 2667 if (error != 0) 2668 return (error); 2669 pp = cp->provider; 2670 g_topology_unlock(); 2671 /* Metadata are stored on last sector. */ 2672 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2673 &error); 2674 g_topology_lock(); 2675 g_access(cp, -1, 0, 0); 2676 if (buf == NULL) { 2677 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2678 cp->provider->name, error); 2679 return (error); 2680 } 2681 2682 /* Decode metadata. */ 2683 error = mirror_metadata_decode(buf, md); 2684 g_free(buf); 2685 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2686 return (EINVAL); 2687 if (md->md_version > G_MIRROR_VERSION) { 2688 G_MIRROR_DEBUG(0, 2689 "Kernel module is too old to handle metadata from %s.", 2690 cp->provider->name); 2691 return (EINVAL); 2692 } 2693 if (error != 0) { 2694 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2695 cp->provider->name); 2696 return (error); 2697 } 2698 2699 return (0); 2700} 2701 2702static int 2703g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2704 struct g_mirror_metadata *md) 2705{ 2706 2707 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2708 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2709 pp->name, md->md_did); 2710 return (EEXIST); 2711 } 2712 if (md->md_all != sc->sc_ndisks) { 2713 G_MIRROR_DEBUG(1, 2714 "Invalid '%s' field on disk %s (device %s), skipping.", 2715 "md_all", pp->name, sc->sc_name); 2716 return (EINVAL); 2717 } 2718 if (md->md_slice != sc->sc_slice) { 2719 G_MIRROR_DEBUG(1, 2720 "Invalid '%s' field on disk %s (device %s), skipping.", 2721 "md_slice", pp->name, sc->sc_name); 2722 return (EINVAL); 2723 } 2724 if (md->md_balance != sc->sc_balance) { 2725 G_MIRROR_DEBUG(1, 2726 "Invalid '%s' field on disk %s (device %s), skipping.", 2727 "md_balance", pp->name, sc->sc_name); 2728 return (EINVAL); 2729 } 2730#if 0 2731 if (md->md_mediasize != sc->sc_mediasize) { 2732 G_MIRROR_DEBUG(1, 2733 "Invalid '%s' field on disk %s (device %s), skipping.", 2734 "md_mediasize", pp->name, sc->sc_name); 2735 return (EINVAL); 2736 } 2737#endif 2738 if (sc->sc_mediasize > pp->mediasize) { 2739 G_MIRROR_DEBUG(1, 2740 "Invalid size of disk %s (device %s), skipping.", pp->name, 2741 sc->sc_name); 2742 return (EINVAL); 2743 } 2744 if (md->md_sectorsize != sc->sc_sectorsize) { 2745 G_MIRROR_DEBUG(1, 2746 "Invalid '%s' field on disk %s (device %s), skipping.", 2747 "md_sectorsize", pp->name, sc->sc_name); 2748 return (EINVAL); 2749 } 2750 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2751 G_MIRROR_DEBUG(1, 2752 "Invalid sector size of disk %s (device %s), skipping.", 2753 pp->name, sc->sc_name); 2754 return (EINVAL); 2755 } 2756 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2757 G_MIRROR_DEBUG(1, 2758 "Invalid device flags on disk %s (device %s), skipping.", 2759 pp->name, sc->sc_name); 2760 return (EINVAL); 2761 } 2762 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2763 G_MIRROR_DEBUG(1, 2764 "Invalid disk flags on disk %s (device %s), skipping.", 2765 pp->name, sc->sc_name); 2766 return (EINVAL); 2767 } 2768 return (0); 2769} 2770 2771int 2772g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2773 struct g_mirror_metadata *md) 2774{ 2775 struct g_mirror_disk *disk; 2776 int error; 2777 2778 g_topology_assert_not(); 2779 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2780 2781 error = g_mirror_check_metadata(sc, pp, md); 2782 if (error != 0) 2783 return (error); 2784 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2785 md->md_genid < sc->sc_genid) { 2786 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2787 pp->name, sc->sc_name); 2788 return (EINVAL); 2789 } 2790 disk = g_mirror_init_disk(sc, pp, md, &error); 2791 if (disk == NULL) 2792 return (error); 2793 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2794 G_MIRROR_EVENT_WAIT); 2795 if (error != 0) 2796 return (error); 2797 if (md->md_version < G_MIRROR_VERSION) { 2798 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2799 pp->name, md->md_version, G_MIRROR_VERSION); 2800 g_mirror_update_metadata(disk); 2801 } 2802 return (0); 2803} 2804 2805static void 2806g_mirror_destroy_delayed(void *arg, int flag) 2807{ 2808 struct g_mirror_softc *sc; 2809 int error; 2810 2811 if (flag == EV_CANCEL) { 2812 G_MIRROR_DEBUG(1, "Destroying canceled."); 2813 return; 2814 } 2815 sc = arg; 2816 g_topology_unlock(); 2817 sx_xlock(&sc->sc_lock); 2818 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2819 ("DESTROY flag set on %s.", sc->sc_name)); 2820 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2821 ("DESTROYING flag not set on %s.", sc->sc_name)); 2822 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2823 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2824 if (error != 0) { 2825 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2826 sx_xunlock(&sc->sc_lock); 2827 } 2828 g_topology_lock(); 2829} 2830 2831static int 2832g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2833{ 2834 struct g_mirror_softc *sc; 2835 int dcr, dcw, dce, error = 0; 2836 2837 g_topology_assert(); 2838 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2839 acw, ace); 2840 2841 sc = pp->geom->softc; 2842 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2843 return (0); 2844 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2845 2846 dcr = pp->acr + acr; 2847 dcw = pp->acw + acw; 2848 dce = pp->ace + ace; 2849 2850 g_topology_unlock(); 2851 sx_xlock(&sc->sc_lock); 2852 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2853 LIST_EMPTY(&sc->sc_disks)) { 2854 if (acr > 0 || acw > 0 || ace > 0) 2855 error = ENXIO; 2856 goto end; 2857 } 2858 if (dcw == 0) 2859 g_mirror_idle(sc, dcw); 2860 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2861 if (acr > 0 || acw > 0 || ace > 0) { 2862 error = ENXIO; 2863 goto end; 2864 } 2865 if (dcr == 0 && dcw == 0 && dce == 0) { 2866 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2867 sc, NULL); 2868 } 2869 } 2870end: 2871 sx_xunlock(&sc->sc_lock); 2872 g_topology_lock(); 2873 return (error); 2874} 2875 2876static struct g_geom * 2877g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2878{ 2879 struct g_mirror_softc *sc; 2880 struct g_geom *gp; 2881 int error, timeout; 2882 2883 g_topology_assert(); 2884 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2885 md->md_mid); 2886 2887 /* One disk is minimum. */ 2888 if (md->md_all < 1) 2889 return (NULL); 2890 /* 2891 * Action geom. 2892 */ 2893 gp = g_new_geomf(mp, "%s", md->md_name); 2894 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2895 gp->start = g_mirror_start; 2896 gp->orphan = g_mirror_orphan; 2897 gp->access = g_mirror_access; 2898 gp->dumpconf = g_mirror_dumpconf; 2899 2900 sc->sc_id = md->md_mid; 2901 sc->sc_slice = md->md_slice; 2902 sc->sc_balance = md->md_balance; 2903 sc->sc_mediasize = md->md_mediasize; 2904 sc->sc_sectorsize = md->md_sectorsize; 2905 sc->sc_ndisks = md->md_all; 2906 sc->sc_flags = md->md_mflags; 2907 sc->sc_bump_id = 0; 2908 sc->sc_idle = 1; 2909 sc->sc_last_write = time_uptime; 2910 sc->sc_writes = 0; 2911 sx_init(&sc->sc_lock, "gmirror:lock"); 2912 bioq_init(&sc->sc_queue); 2913 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2914 bioq_init(&sc->sc_regular_delayed); 2915 bioq_init(&sc->sc_inflight); 2916 bioq_init(&sc->sc_sync_delayed); 2917 LIST_INIT(&sc->sc_disks); 2918 TAILQ_INIT(&sc->sc_events); 2919 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2920 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2921 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 2922 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2923 gp->softc = sc; 2924 sc->sc_geom = gp; 2925 sc->sc_provider = NULL; 2926 /* 2927 * Synchronization geom. 2928 */ 2929 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2930 gp->softc = sc; 2931 gp->orphan = g_mirror_orphan; 2932 sc->sc_sync.ds_geom = gp; 2933 sc->sc_sync.ds_ndisks = 0; 2934 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2935 "g_mirror %s", md->md_name); 2936 if (error != 0) { 2937 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2938 sc->sc_name); 2939 g_destroy_geom(sc->sc_sync.ds_geom); 2940 mtx_destroy(&sc->sc_done_mtx); 2941 mtx_destroy(&sc->sc_events_mtx); 2942 mtx_destroy(&sc->sc_queue_mtx); 2943 sx_destroy(&sc->sc_lock); 2944 g_destroy_geom(sc->sc_geom); 2945 free(sc, M_MIRROR); 2946 return (NULL); 2947 } 2948 2949 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2950 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2951 2952 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2953 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2954 /* 2955 * Run timeout. 2956 */ 2957 timeout = g_mirror_timeout * hz; 2958 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2959 return (sc->sc_geom); 2960} 2961 2962int 2963g_mirror_destroy(struct g_mirror_softc *sc, int how) 2964{ 2965 struct g_mirror_disk *disk; 2966 struct g_provider *pp; 2967 2968 g_topology_assert_not(); 2969 if (sc == NULL) 2970 return (ENXIO); 2971 sx_assert(&sc->sc_lock, SX_XLOCKED); 2972 2973 pp = sc->sc_provider; 2974 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2975 switch (how) { 2976 case G_MIRROR_DESTROY_SOFT: 2977 G_MIRROR_DEBUG(1, 2978 "Device %s is still open (r%dw%de%d).", pp->name, 2979 pp->acr, pp->acw, pp->ace); 2980 return (EBUSY); 2981 case G_MIRROR_DESTROY_DELAYED: 2982 G_MIRROR_DEBUG(1, 2983 "Device %s will be destroyed on last close.", 2984 pp->name); 2985 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2986 if (disk->d_state == 2987 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2988 g_mirror_sync_stop(disk, 1); 2989 } 2990 } 2991 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2992 return (EBUSY); 2993 case G_MIRROR_DESTROY_HARD: 2994 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2995 "can't be definitely removed.", pp->name); 2996 } 2997 } 2998 2999 g_topology_lock(); 3000 if (sc->sc_geom->softc == NULL) { 3001 g_topology_unlock(); 3002 return (0); 3003 } 3004 sc->sc_geom->softc = NULL; 3005 sc->sc_sync.ds_geom->softc = NULL; 3006 g_topology_unlock(); 3007 3008 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3009 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 3010 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3011 sx_xunlock(&sc->sc_lock); 3012 mtx_lock(&sc->sc_queue_mtx); 3013 wakeup(sc); 3014 mtx_unlock(&sc->sc_queue_mtx); 3015 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3016 while (sc->sc_worker != NULL) 3017 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3018 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3019 sx_xlock(&sc->sc_lock); 3020 g_mirror_destroy_device(sc); 3021 free(sc, M_MIRROR); 3022 return (0); 3023} 3024 3025static void 3026g_mirror_taste_orphan(struct g_consumer *cp) 3027{ 3028 3029 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3030 cp->provider->name)); 3031} 3032 3033static struct g_geom * 3034g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3035{ 3036 struct g_mirror_metadata md; 3037 struct g_mirror_softc *sc; 3038 struct g_consumer *cp; 3039 struct g_geom *gp; 3040 int error; 3041 3042 g_topology_assert(); 3043 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3044 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3045 3046 gp = g_new_geomf(mp, "mirror:taste"); 3047 /* 3048 * This orphan function should be never called. 3049 */ 3050 gp->orphan = g_mirror_taste_orphan; 3051 cp = g_new_consumer(gp); 3052 g_attach(cp, pp); 3053 error = g_mirror_read_metadata(cp, &md); 3054 g_detach(cp); 3055 g_destroy_consumer(cp); 3056 g_destroy_geom(gp); 3057 if (error != 0) 3058 return (NULL); 3059 gp = NULL; 3060 3061 if (md.md_provider[0] != '\0' && 3062 !g_compare_names(md.md_provider, pp->name)) 3063 return (NULL); 3064 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3065 return (NULL); 3066 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3067 G_MIRROR_DEBUG(0, 3068 "Device %s: provider %s marked as inactive, skipping.", 3069 md.md_name, pp->name); 3070 return (NULL); 3071 } 3072 if (g_mirror_debug >= 2) 3073 mirror_metadata_dump(&md); 3074 3075 /* 3076 * Let's check if device already exists. 3077 */ 3078 sc = NULL; 3079 LIST_FOREACH(gp, &mp->geom, geom) { 3080 sc = gp->softc; 3081 if (sc == NULL) 3082 continue; 3083 if (sc->sc_sync.ds_geom == gp) 3084 continue; 3085 if (strcmp(md.md_name, sc->sc_name) != 0) 3086 continue; 3087 if (md.md_mid != sc->sc_id) { 3088 G_MIRROR_DEBUG(0, "Device %s already configured.", 3089 sc->sc_name); 3090 return (NULL); 3091 } 3092 break; 3093 } 3094 if (gp == NULL) { 3095 gp = g_mirror_create(mp, &md); 3096 if (gp == NULL) { 3097 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3098 md.md_name); 3099 return (NULL); 3100 } 3101 sc = gp->softc; 3102 } 3103 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3104 g_topology_unlock(); 3105 sx_xlock(&sc->sc_lock); 3106 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3107 error = g_mirror_add_disk(sc, pp, &md); 3108 if (error != 0) { 3109 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3110 pp->name, gp->name, error); 3111 if (LIST_EMPTY(&sc->sc_disks)) { 3112 g_cancel_event(sc); 3113 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3114 g_topology_lock(); 3115 return (NULL); 3116 } 3117 gp = NULL; 3118 } 3119 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3120 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3121 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3122 g_topology_lock(); 3123 return (NULL); 3124 } 3125 sx_xunlock(&sc->sc_lock); 3126 g_topology_lock(); 3127 return (gp); 3128} 3129 3130static void 3131g_mirror_resize(struct g_consumer *cp) 3132{ 3133 struct g_mirror_disk *disk; 3134 3135 g_topology_assert(); 3136 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3137 3138 disk = cp->private; 3139 if (disk == NULL) 3140 return; 3141 g_topology_unlock(); 3142 g_mirror_update_metadata(disk); 3143 g_topology_lock(); 3144} 3145 3146static int 3147g_mirror_destroy_geom(struct gctl_req *req __unused, 3148 struct g_class *mp __unused, struct g_geom *gp) 3149{ 3150 struct g_mirror_softc *sc; 3151 int error; 3152 3153 g_topology_unlock(); 3154 sc = gp->softc; 3155 sx_xlock(&sc->sc_lock); 3156 g_cancel_event(sc); 3157 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3158 if (error != 0) 3159 sx_xunlock(&sc->sc_lock); 3160 g_topology_lock(); 3161 return (error); 3162} 3163 3164static void 3165g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3166 struct g_consumer *cp, struct g_provider *pp) 3167{ 3168 struct g_mirror_softc *sc; 3169 3170 g_topology_assert(); 3171 3172 sc = gp->softc; 3173 if (sc == NULL) 3174 return; 3175 /* Skip synchronization geom. */ 3176 if (gp == sc->sc_sync.ds_geom) 3177 return; 3178 if (pp != NULL) { 3179 /* Nothing here. */ 3180 } else if (cp != NULL) { 3181 struct g_mirror_disk *disk; 3182 3183 disk = cp->private; 3184 if (disk == NULL) 3185 return; 3186 g_topology_unlock(); 3187 sx_xlock(&sc->sc_lock); 3188 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3189 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3190 sbuf_printf(sb, "%s<Synchronized>", indent); 3191 if (disk->d_sync.ds_offset == 0) 3192 sbuf_printf(sb, "0%%"); 3193 else { 3194 sbuf_printf(sb, "%u%%", 3195 (u_int)((disk->d_sync.ds_offset * 100) / 3196 sc->sc_provider->mediasize)); 3197 } 3198 sbuf_printf(sb, "</Synchronized>\n"); 3199 if (disk->d_sync.ds_offset > 0) { 3200 sbuf_printf(sb, "%s<BytesSynced>%jd" 3201 "</BytesSynced>\n", indent, 3202 (intmax_t)disk->d_sync.ds_offset); 3203 } 3204 } 3205 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3206 disk->d_sync.ds_syncid); 3207 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3208 disk->d_genid); 3209 sbuf_printf(sb, "%s<Flags>", indent); 3210 if (disk->d_flags == 0) 3211 sbuf_printf(sb, "NONE"); 3212 else { 3213 int first = 1; 3214 3215#define ADD_FLAG(flag, name) do { \ 3216 if ((disk->d_flags & (flag)) != 0) { \ 3217 if (!first) \ 3218 sbuf_printf(sb, ", "); \ 3219 else \ 3220 first = 0; \ 3221 sbuf_printf(sb, name); \ 3222 } \ 3223} while (0) 3224 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3225 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3226 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3227 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3228 "SYNCHRONIZING"); 3229 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3230 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3231#undef ADD_FLAG 3232 } 3233 sbuf_printf(sb, "</Flags>\n"); 3234 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3235 disk->d_priority); 3236 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3237 g_mirror_disk_state2str(disk->d_state)); 3238 sx_xunlock(&sc->sc_lock); 3239 g_topology_lock(); 3240 } else { 3241 g_topology_unlock(); 3242 sx_xlock(&sc->sc_lock); 3243 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3244 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3245 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3246 sbuf_printf(sb, "%s<Flags>", indent); 3247 if (sc->sc_flags == 0) 3248 sbuf_printf(sb, "NONE"); 3249 else { 3250 int first = 1; 3251 3252#define ADD_FLAG(flag, name) do { \ 3253 if ((sc->sc_flags & (flag)) != 0) { \ 3254 if (!first) \ 3255 sbuf_printf(sb, ", "); \ 3256 else \ 3257 first = 0; \ 3258 sbuf_printf(sb, name); \ 3259 } \ 3260} while (0) 3261 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3262 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3263#undef ADD_FLAG 3264 } 3265 sbuf_printf(sb, "</Flags>\n"); 3266 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3267 (u_int)sc->sc_slice); 3268 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3269 balance_name(sc->sc_balance)); 3270 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3271 sc->sc_ndisks); 3272 sbuf_printf(sb, "%s<State>", indent); 3273 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3274 sbuf_printf(sb, "%s", "STARTING"); 3275 else if (sc->sc_ndisks == 3276 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3277 sbuf_printf(sb, "%s", "COMPLETE"); 3278 else 3279 sbuf_printf(sb, "%s", "DEGRADED"); 3280 sbuf_printf(sb, "</State>\n"); 3281 sx_xunlock(&sc->sc_lock); 3282 g_topology_lock(); 3283 } 3284} 3285 3286static void 3287g_mirror_shutdown_post_sync(void *arg, int howto) 3288{ 3289 struct g_class *mp; 3290 struct g_geom *gp, *gp2; 3291 struct g_mirror_softc *sc; 3292 int error; 3293 3294 mp = arg; 3295 DROP_GIANT(); 3296 g_topology_lock(); 3297 g_mirror_shutdown = 1; 3298 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3299 if ((sc = gp->softc) == NULL) 3300 continue; 3301 /* Skip synchronization geom. */ 3302 if (gp == sc->sc_sync.ds_geom) 3303 continue; 3304 g_topology_unlock(); 3305 sx_xlock(&sc->sc_lock); 3306 g_mirror_idle(sc, -1); 3307 g_cancel_event(sc); 3308 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3309 if (error != 0) 3310 sx_xunlock(&sc->sc_lock); 3311 g_topology_lock(); 3312 } 3313 g_topology_unlock(); 3314 PICKUP_GIANT(); 3315} 3316 3317static void 3318g_mirror_init(struct g_class *mp) 3319{ 3320 3321 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3322 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3323 if (g_mirror_post_sync == NULL) 3324 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3325} 3326 3327static void 3328g_mirror_fini(struct g_class *mp) 3329{ 3330 3331 if (g_mirror_post_sync != NULL) 3332 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3333} 3334 3335DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3336