g_mirror.c revision 235599
1/*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 235599 2012-05-18 09:19:07Z ae $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/module.h> 34#include <sys/limits.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/bio.h> 38#include <sys/sbuf.h> 39#include <sys/sysctl.h> 40#include <sys/malloc.h> 41#include <sys/eventhandler.h> 42#include <vm/uma.h> 43#include <geom/geom.h> 44#include <sys/proc.h> 45#include <sys/kthread.h> 46#include <sys/sched.h> 47#include <geom/mirror/g_mirror.h> 48 49FEATURE(geom_mirror, "GEOM mirroring support"); 50 51static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 52 53SYSCTL_DECL(_kern_geom); 54static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 55 "GEOM_MIRROR stuff"); 56u_int g_mirror_debug = 0; 57TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 58SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 59 "Debug level"); 60static u_int g_mirror_timeout = 4; 61TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 62SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 63 0, "Time to wait on all mirror components"); 64static u_int g_mirror_idletime = 5; 65TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 66SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 67 &g_mirror_idletime, 0, "Mark components as clean when idling"); 68static u_int g_mirror_disconnect_on_failure = 1; 69TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 70 &g_mirror_disconnect_on_failure); 71SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 72 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 73static u_int g_mirror_syncreqs = 2; 74TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 75SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 76 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 77 78#define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 79 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 80 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 81 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 82} while (0) 83 84static eventhandler_tag g_mirror_pre_sync = NULL; 85 86static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 87 struct g_geom *gp); 88static g_taste_t g_mirror_taste; 89static void g_mirror_init(struct g_class *mp); 90static void g_mirror_fini(struct g_class *mp); 91 92struct g_class g_mirror_class = { 93 .name = G_MIRROR_CLASS_NAME, 94 .version = G_VERSION, 95 .ctlreq = g_mirror_config, 96 .taste = g_mirror_taste, 97 .destroy_geom = g_mirror_destroy_geom, 98 .init = g_mirror_init, 99 .fini = g_mirror_fini 100}; 101 102 103static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 104static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 105static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 106static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 107 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 108static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 109static void g_mirror_register_request(struct bio *bp); 110static void g_mirror_sync_release(struct g_mirror_softc *sc); 111 112 113static const char * 114g_mirror_disk_state2str(int state) 115{ 116 117 switch (state) { 118 case G_MIRROR_DISK_STATE_NONE: 119 return ("NONE"); 120 case G_MIRROR_DISK_STATE_NEW: 121 return ("NEW"); 122 case G_MIRROR_DISK_STATE_ACTIVE: 123 return ("ACTIVE"); 124 case G_MIRROR_DISK_STATE_STALE: 125 return ("STALE"); 126 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 127 return ("SYNCHRONIZING"); 128 case G_MIRROR_DISK_STATE_DISCONNECTED: 129 return ("DISCONNECTED"); 130 case G_MIRROR_DISK_STATE_DESTROY: 131 return ("DESTROY"); 132 default: 133 return ("INVALID"); 134 } 135} 136 137static const char * 138g_mirror_device_state2str(int state) 139{ 140 141 switch (state) { 142 case G_MIRROR_DEVICE_STATE_STARTING: 143 return ("STARTING"); 144 case G_MIRROR_DEVICE_STATE_RUNNING: 145 return ("RUNNING"); 146 default: 147 return ("INVALID"); 148 } 149} 150 151static const char * 152g_mirror_get_diskname(struct g_mirror_disk *disk) 153{ 154 155 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 156 return ("[unknown]"); 157 return (disk->d_name); 158} 159 160/* 161 * --- Events handling functions --- 162 * Events in geom_mirror are used to maintain disks and device status 163 * from one thread to simplify locking. 164 */ 165static void 166g_mirror_event_free(struct g_mirror_event *ep) 167{ 168 169 free(ep, M_MIRROR); 170} 171 172int 173g_mirror_event_send(void *arg, int state, int flags) 174{ 175 struct g_mirror_softc *sc; 176 struct g_mirror_disk *disk; 177 struct g_mirror_event *ep; 178 int error; 179 180 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 181 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 182 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 183 disk = NULL; 184 sc = arg; 185 } else { 186 disk = arg; 187 sc = disk->d_softc; 188 } 189 ep->e_disk = disk; 190 ep->e_state = state; 191 ep->e_flags = flags; 192 ep->e_error = 0; 193 mtx_lock(&sc->sc_events_mtx); 194 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 195 mtx_unlock(&sc->sc_events_mtx); 196 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 197 mtx_lock(&sc->sc_queue_mtx); 198 wakeup(sc); 199 mtx_unlock(&sc->sc_queue_mtx); 200 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 201 return (0); 202 sx_assert(&sc->sc_lock, SX_XLOCKED); 203 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 204 sx_xunlock(&sc->sc_lock); 205 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 206 mtx_lock(&sc->sc_events_mtx); 207 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 208 hz * 5); 209 } 210 error = ep->e_error; 211 g_mirror_event_free(ep); 212 sx_xlock(&sc->sc_lock); 213 return (error); 214} 215 216static struct g_mirror_event * 217g_mirror_event_get(struct g_mirror_softc *sc) 218{ 219 struct g_mirror_event *ep; 220 221 mtx_lock(&sc->sc_events_mtx); 222 ep = TAILQ_FIRST(&sc->sc_events); 223 mtx_unlock(&sc->sc_events_mtx); 224 return (ep); 225} 226 227static void 228g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 229{ 230 231 mtx_lock(&sc->sc_events_mtx); 232 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 233 mtx_unlock(&sc->sc_events_mtx); 234} 235 236static void 237g_mirror_event_cancel(struct g_mirror_disk *disk) 238{ 239 struct g_mirror_softc *sc; 240 struct g_mirror_event *ep, *tmpep; 241 242 sc = disk->d_softc; 243 sx_assert(&sc->sc_lock, SX_XLOCKED); 244 245 mtx_lock(&sc->sc_events_mtx); 246 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 247 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 248 continue; 249 if (ep->e_disk != disk) 250 continue; 251 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 252 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 253 g_mirror_event_free(ep); 254 else { 255 ep->e_error = ECANCELED; 256 wakeup(ep); 257 } 258 } 259 mtx_unlock(&sc->sc_events_mtx); 260} 261 262/* 263 * Return the number of disks in given state. 264 * If state is equal to -1, count all connected disks. 265 */ 266u_int 267g_mirror_ndisks(struct g_mirror_softc *sc, int state) 268{ 269 struct g_mirror_disk *disk; 270 u_int n = 0; 271 272 sx_assert(&sc->sc_lock, SX_LOCKED); 273 274 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 275 if (state == -1 || disk->d_state == state) 276 n++; 277 } 278 return (n); 279} 280 281/* 282 * Find a disk in mirror by its disk ID. 283 */ 284static struct g_mirror_disk * 285g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 286{ 287 struct g_mirror_disk *disk; 288 289 sx_assert(&sc->sc_lock, SX_XLOCKED); 290 291 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 292 if (disk->d_id == id) 293 return (disk); 294 } 295 return (NULL); 296} 297 298static u_int 299g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 300{ 301 struct bio *bp; 302 u_int nreqs = 0; 303 304 mtx_lock(&sc->sc_queue_mtx); 305 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 306 if (bp->bio_from == cp) 307 nreqs++; 308 } 309 mtx_unlock(&sc->sc_queue_mtx); 310 return (nreqs); 311} 312 313static int 314g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 315{ 316 317 if (cp->index > 0) { 318 G_MIRROR_DEBUG(2, 319 "I/O requests for %s exist, can't destroy it now.", 320 cp->provider->name); 321 return (1); 322 } 323 if (g_mirror_nrequests(sc, cp) > 0) { 324 G_MIRROR_DEBUG(2, 325 "I/O requests for %s in queue, can't destroy it now.", 326 cp->provider->name); 327 return (1); 328 } 329 return (0); 330} 331 332static void 333g_mirror_destroy_consumer(void *arg, int flags __unused) 334{ 335 struct g_consumer *cp; 336 337 g_topology_assert(); 338 339 cp = arg; 340 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 341 g_detach(cp); 342 g_destroy_consumer(cp); 343} 344 345static void 346g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 347{ 348 struct g_provider *pp; 349 int retaste_wait; 350 351 g_topology_assert(); 352 353 cp->private = NULL; 354 if (g_mirror_is_busy(sc, cp)) 355 return; 356 pp = cp->provider; 357 retaste_wait = 0; 358 if (cp->acw == 1) { 359 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 360 retaste_wait = 1; 361 } 362 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 363 -cp->acw, -cp->ace, 0); 364 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 365 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 366 if (retaste_wait) { 367 /* 368 * After retaste event was send (inside g_access()), we can send 369 * event to detach and destroy consumer. 370 * A class, which has consumer to the given provider connected 371 * will not receive retaste event for the provider. 372 * This is the way how I ignore retaste events when I close 373 * consumers opened for write: I detach and destroy consumer 374 * after retaste event is sent. 375 */ 376 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 377 return; 378 } 379 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 380 g_detach(cp); 381 g_destroy_consumer(cp); 382} 383 384static int 385g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 386{ 387 struct g_consumer *cp; 388 int error; 389 390 g_topology_assert_not(); 391 KASSERT(disk->d_consumer == NULL, 392 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 393 394 g_topology_lock(); 395 cp = g_new_consumer(disk->d_softc->sc_geom); 396 error = g_attach(cp, pp); 397 if (error != 0) { 398 g_destroy_consumer(cp); 399 g_topology_unlock(); 400 return (error); 401 } 402 error = g_access(cp, 1, 1, 1); 403 if (error != 0) { 404 g_detach(cp); 405 g_destroy_consumer(cp); 406 g_topology_unlock(); 407 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 408 pp->name, error); 409 return (error); 410 } 411 g_topology_unlock(); 412 disk->d_consumer = cp; 413 disk->d_consumer->private = disk; 414 disk->d_consumer->index = 0; 415 416 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 417 return (0); 418} 419 420static void 421g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 422{ 423 424 g_topology_assert(); 425 426 if (cp == NULL) 427 return; 428 if (cp->provider != NULL) 429 g_mirror_kill_consumer(sc, cp); 430 else 431 g_destroy_consumer(cp); 432} 433 434/* 435 * Initialize disk. This means allocate memory, create consumer, attach it 436 * to the provider and open access (r1w1e1) to it. 437 */ 438static struct g_mirror_disk * 439g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 440 struct g_mirror_metadata *md, int *errorp) 441{ 442 struct g_mirror_disk *disk; 443 int error; 444 445 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 446 if (disk == NULL) { 447 error = ENOMEM; 448 goto fail; 449 } 450 disk->d_softc = sc; 451 error = g_mirror_connect_disk(disk, pp); 452 if (error != 0) 453 goto fail; 454 disk->d_id = md->md_did; 455 disk->d_state = G_MIRROR_DISK_STATE_NONE; 456 disk->d_priority = md->md_priority; 457 disk->d_flags = md->md_dflags; 458 if (md->md_provider[0] != '\0') 459 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 460 disk->d_sync.ds_consumer = NULL; 461 disk->d_sync.ds_offset = md->md_sync_offset; 462 disk->d_sync.ds_offset_done = md->md_sync_offset; 463 disk->d_genid = md->md_genid; 464 disk->d_sync.ds_syncid = md->md_syncid; 465 if (errorp != NULL) 466 *errorp = 0; 467 return (disk); 468fail: 469 if (errorp != NULL) 470 *errorp = error; 471 if (disk != NULL) 472 free(disk, M_MIRROR); 473 return (NULL); 474} 475 476static void 477g_mirror_destroy_disk(struct g_mirror_disk *disk) 478{ 479 struct g_mirror_softc *sc; 480 481 g_topology_assert_not(); 482 sc = disk->d_softc; 483 sx_assert(&sc->sc_lock, SX_XLOCKED); 484 485 LIST_REMOVE(disk, d_next); 486 g_mirror_event_cancel(disk); 487 if (sc->sc_hint == disk) 488 sc->sc_hint = NULL; 489 switch (disk->d_state) { 490 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 491 g_mirror_sync_stop(disk, 1); 492 /* FALLTHROUGH */ 493 case G_MIRROR_DISK_STATE_NEW: 494 case G_MIRROR_DISK_STATE_STALE: 495 case G_MIRROR_DISK_STATE_ACTIVE: 496 g_topology_lock(); 497 g_mirror_disconnect_consumer(sc, disk->d_consumer); 498 g_topology_unlock(); 499 free(disk, M_MIRROR); 500 break; 501 default: 502 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 503 g_mirror_get_diskname(disk), 504 g_mirror_disk_state2str(disk->d_state))); 505 } 506} 507 508static void 509g_mirror_destroy_device(struct g_mirror_softc *sc) 510{ 511 struct g_mirror_disk *disk; 512 struct g_mirror_event *ep; 513 struct g_geom *gp; 514 struct g_consumer *cp, *tmpcp; 515 516 g_topology_assert_not(); 517 sx_assert(&sc->sc_lock, SX_XLOCKED); 518 519 gp = sc->sc_geom; 520 if (sc->sc_provider != NULL) 521 g_mirror_destroy_provider(sc); 522 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 523 disk = LIST_FIRST(&sc->sc_disks)) { 524 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 525 g_mirror_update_metadata(disk); 526 g_mirror_destroy_disk(disk); 527 } 528 while ((ep = g_mirror_event_get(sc)) != NULL) { 529 g_mirror_event_remove(sc, ep); 530 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 531 g_mirror_event_free(ep); 532 else { 533 ep->e_error = ECANCELED; 534 ep->e_flags |= G_MIRROR_EVENT_DONE; 535 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 536 mtx_lock(&sc->sc_events_mtx); 537 wakeup(ep); 538 mtx_unlock(&sc->sc_events_mtx); 539 } 540 } 541 callout_drain(&sc->sc_callout); 542 543 g_topology_lock(); 544 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 545 g_mirror_disconnect_consumer(sc, cp); 546 } 547 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 548 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 549 g_wither_geom(gp, ENXIO); 550 g_topology_unlock(); 551 mtx_destroy(&sc->sc_queue_mtx); 552 mtx_destroy(&sc->sc_events_mtx); 553 sx_xunlock(&sc->sc_lock); 554 sx_destroy(&sc->sc_lock); 555} 556 557static void 558g_mirror_orphan(struct g_consumer *cp) 559{ 560 struct g_mirror_disk *disk; 561 562 g_topology_assert(); 563 564 disk = cp->private; 565 if (disk == NULL) 566 return; 567 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 568 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 569 G_MIRROR_EVENT_DONTWAIT); 570} 571 572/* 573 * Function should return the next active disk on the list. 574 * It is possible that it will be the same disk as given. 575 * If there are no active disks on list, NULL is returned. 576 */ 577static __inline struct g_mirror_disk * 578g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 579{ 580 struct g_mirror_disk *dp; 581 582 for (dp = LIST_NEXT(disk, d_next); dp != disk; 583 dp = LIST_NEXT(dp, d_next)) { 584 if (dp == NULL) 585 dp = LIST_FIRST(&sc->sc_disks); 586 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 587 break; 588 } 589 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 590 return (NULL); 591 return (dp); 592} 593 594static struct g_mirror_disk * 595g_mirror_get_disk(struct g_mirror_softc *sc) 596{ 597 struct g_mirror_disk *disk; 598 599 if (sc->sc_hint == NULL) { 600 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 601 if (sc->sc_hint == NULL) 602 return (NULL); 603 } 604 disk = sc->sc_hint; 605 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 606 disk = g_mirror_find_next(sc, disk); 607 if (disk == NULL) 608 return (NULL); 609 } 610 sc->sc_hint = g_mirror_find_next(sc, disk); 611 return (disk); 612} 613 614static int 615g_mirror_write_metadata(struct g_mirror_disk *disk, 616 struct g_mirror_metadata *md) 617{ 618 struct g_mirror_softc *sc; 619 struct g_consumer *cp; 620 off_t offset, length; 621 u_char *sector; 622 int error = 0; 623 624 g_topology_assert_not(); 625 sc = disk->d_softc; 626 sx_assert(&sc->sc_lock, SX_LOCKED); 627 628 cp = disk->d_consumer; 629 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 630 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 631 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 632 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 633 cp->acw, cp->ace)); 634 length = cp->provider->sectorsize; 635 offset = cp->provider->mediasize - length; 636 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 637 if (md != NULL) 638 mirror_metadata_encode(md, sector); 639 error = g_write_data(cp, offset, sector, length); 640 free(sector, M_MIRROR); 641 if (error != 0) { 642 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 643 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 644 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 645 "(device=%s, error=%d).", 646 g_mirror_get_diskname(disk), sc->sc_name, error); 647 } else { 648 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 649 "(device=%s, error=%d).", 650 g_mirror_get_diskname(disk), sc->sc_name, error); 651 } 652 if (g_mirror_disconnect_on_failure && 653 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 654 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 655 g_mirror_event_send(disk, 656 G_MIRROR_DISK_STATE_DISCONNECTED, 657 G_MIRROR_EVENT_DONTWAIT); 658 } 659 } 660 return (error); 661} 662 663static int 664g_mirror_clear_metadata(struct g_mirror_disk *disk) 665{ 666 int error; 667 668 g_topology_assert_not(); 669 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 670 671 error = g_mirror_write_metadata(disk, NULL); 672 if (error == 0) { 673 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 674 g_mirror_get_diskname(disk)); 675 } else { 676 G_MIRROR_DEBUG(0, 677 "Cannot clear metadata on disk %s (error=%d).", 678 g_mirror_get_diskname(disk), error); 679 } 680 return (error); 681} 682 683void 684g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 685 struct g_mirror_metadata *md) 686{ 687 688 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 689 md->md_version = G_MIRROR_VERSION; 690 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 691 md->md_mid = sc->sc_id; 692 md->md_all = sc->sc_ndisks; 693 md->md_slice = sc->sc_slice; 694 md->md_balance = sc->sc_balance; 695 md->md_genid = sc->sc_genid; 696 md->md_mediasize = sc->sc_mediasize; 697 md->md_sectorsize = sc->sc_sectorsize; 698 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 699 bzero(md->md_provider, sizeof(md->md_provider)); 700 if (disk == NULL) { 701 md->md_did = arc4random(); 702 md->md_priority = 0; 703 md->md_syncid = 0; 704 md->md_dflags = 0; 705 md->md_sync_offset = 0; 706 md->md_provsize = 0; 707 } else { 708 md->md_did = disk->d_id; 709 md->md_priority = disk->d_priority; 710 md->md_syncid = disk->d_sync.ds_syncid; 711 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 712 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 713 md->md_sync_offset = disk->d_sync.ds_offset_done; 714 else 715 md->md_sync_offset = 0; 716 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 717 strlcpy(md->md_provider, 718 disk->d_consumer->provider->name, 719 sizeof(md->md_provider)); 720 } 721 md->md_provsize = disk->d_consumer->provider->mediasize; 722 } 723} 724 725void 726g_mirror_update_metadata(struct g_mirror_disk *disk) 727{ 728 struct g_mirror_softc *sc; 729 struct g_mirror_metadata md; 730 int error; 731 732 g_topology_assert_not(); 733 sc = disk->d_softc; 734 sx_assert(&sc->sc_lock, SX_LOCKED); 735 736 g_mirror_fill_metadata(sc, disk, &md); 737 error = g_mirror_write_metadata(disk, &md); 738 if (error == 0) { 739 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 740 g_mirror_get_diskname(disk)); 741 } else { 742 G_MIRROR_DEBUG(0, 743 "Cannot update metadata on disk %s (error=%d).", 744 g_mirror_get_diskname(disk), error); 745 } 746} 747 748static void 749g_mirror_bump_syncid(struct g_mirror_softc *sc) 750{ 751 struct g_mirror_disk *disk; 752 753 g_topology_assert_not(); 754 sx_assert(&sc->sc_lock, SX_XLOCKED); 755 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 756 ("%s called with no active disks (device=%s).", __func__, 757 sc->sc_name)); 758 759 sc->sc_syncid++; 760 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 761 sc->sc_syncid); 762 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 763 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 764 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 765 disk->d_sync.ds_syncid = sc->sc_syncid; 766 g_mirror_update_metadata(disk); 767 } 768 } 769} 770 771static void 772g_mirror_bump_genid(struct g_mirror_softc *sc) 773{ 774 struct g_mirror_disk *disk; 775 776 g_topology_assert_not(); 777 sx_assert(&sc->sc_lock, SX_XLOCKED); 778 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 779 ("%s called with no active disks (device=%s).", __func__, 780 sc->sc_name)); 781 782 sc->sc_genid++; 783 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 784 sc->sc_genid); 785 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 786 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 787 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 788 disk->d_genid = sc->sc_genid; 789 g_mirror_update_metadata(disk); 790 } 791 } 792} 793 794static int 795g_mirror_idle(struct g_mirror_softc *sc, int acw) 796{ 797 struct g_mirror_disk *disk; 798 int timeout; 799 800 g_topology_assert_not(); 801 sx_assert(&sc->sc_lock, SX_XLOCKED); 802 803 if (sc->sc_provider == NULL) 804 return (0); 805 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 806 return (0); 807 if (sc->sc_idle) 808 return (0); 809 if (sc->sc_writes > 0) 810 return (0); 811 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 812 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 813 if (timeout > 0) 814 return (timeout); 815 } 816 sc->sc_idle = 1; 817 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 818 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 819 continue; 820 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 821 g_mirror_get_diskname(disk), sc->sc_name); 822 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 823 g_mirror_update_metadata(disk); 824 } 825 return (0); 826} 827 828static void 829g_mirror_unidle(struct g_mirror_softc *sc) 830{ 831 struct g_mirror_disk *disk; 832 833 g_topology_assert_not(); 834 sx_assert(&sc->sc_lock, SX_XLOCKED); 835 836 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 837 return; 838 sc->sc_idle = 0; 839 sc->sc_last_write = time_uptime; 840 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 841 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 842 continue; 843 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 844 g_mirror_get_diskname(disk), sc->sc_name); 845 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 846 g_mirror_update_metadata(disk); 847 } 848} 849 850static void 851g_mirror_done(struct bio *bp) 852{ 853 struct g_mirror_softc *sc; 854 855 sc = bp->bio_from->geom->softc; 856 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 857 mtx_lock(&sc->sc_queue_mtx); 858 bioq_disksort(&sc->sc_queue, bp); 859 mtx_unlock(&sc->sc_queue_mtx); 860 wakeup(sc); 861} 862 863static void 864g_mirror_regular_request(struct bio *bp) 865{ 866 struct g_mirror_softc *sc; 867 struct g_mirror_disk *disk; 868 struct bio *pbp; 869 870 g_topology_assert_not(); 871 872 pbp = bp->bio_parent; 873 sc = pbp->bio_to->geom->softc; 874 bp->bio_from->index--; 875 if (bp->bio_cmd == BIO_WRITE) 876 sc->sc_writes--; 877 disk = bp->bio_from->private; 878 if (disk == NULL) { 879 g_topology_lock(); 880 g_mirror_kill_consumer(sc, bp->bio_from); 881 g_topology_unlock(); 882 } 883 884 pbp->bio_inbed++; 885 KASSERT(pbp->bio_inbed <= pbp->bio_children, 886 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 887 pbp->bio_children)); 888 if (bp->bio_error == 0 && pbp->bio_error == 0) { 889 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 890 g_destroy_bio(bp); 891 if (pbp->bio_children == pbp->bio_inbed) { 892 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 893 pbp->bio_completed = pbp->bio_length; 894 if (pbp->bio_cmd == BIO_WRITE) { 895 bioq_remove(&sc->sc_inflight, pbp); 896 /* Release delayed sync requests if possible. */ 897 g_mirror_sync_release(sc); 898 } 899 g_io_deliver(pbp, pbp->bio_error); 900 } 901 return; 902 } else if (bp->bio_error != 0) { 903 if (pbp->bio_error == 0) 904 pbp->bio_error = bp->bio_error; 905 if (disk != NULL) { 906 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 907 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 908 G_MIRROR_LOGREQ(0, bp, 909 "Request failed (error=%d).", 910 bp->bio_error); 911 } else { 912 G_MIRROR_LOGREQ(1, bp, 913 "Request failed (error=%d).", 914 bp->bio_error); 915 } 916 if (g_mirror_disconnect_on_failure && 917 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 918 { 919 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 920 g_mirror_event_send(disk, 921 G_MIRROR_DISK_STATE_DISCONNECTED, 922 G_MIRROR_EVENT_DONTWAIT); 923 } 924 } 925 switch (pbp->bio_cmd) { 926 case BIO_DELETE: 927 case BIO_WRITE: 928 pbp->bio_inbed--; 929 pbp->bio_children--; 930 break; 931 } 932 } 933 g_destroy_bio(bp); 934 935 switch (pbp->bio_cmd) { 936 case BIO_READ: 937 if (pbp->bio_inbed < pbp->bio_children) 938 break; 939 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 940 g_io_deliver(pbp, pbp->bio_error); 941 else { 942 pbp->bio_error = 0; 943 mtx_lock(&sc->sc_queue_mtx); 944 bioq_disksort(&sc->sc_queue, pbp); 945 mtx_unlock(&sc->sc_queue_mtx); 946 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 947 wakeup(sc); 948 } 949 break; 950 case BIO_DELETE: 951 case BIO_WRITE: 952 if (pbp->bio_children == 0) { 953 /* 954 * All requests failed. 955 */ 956 } else if (pbp->bio_inbed < pbp->bio_children) { 957 /* Do nothing. */ 958 break; 959 } else if (pbp->bio_children == pbp->bio_inbed) { 960 /* Some requests succeeded. */ 961 pbp->bio_error = 0; 962 pbp->bio_completed = pbp->bio_length; 963 } 964 bioq_remove(&sc->sc_inflight, pbp); 965 /* Release delayed sync requests if possible. */ 966 g_mirror_sync_release(sc); 967 g_io_deliver(pbp, pbp->bio_error); 968 break; 969 default: 970 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 971 break; 972 } 973} 974 975static void 976g_mirror_sync_done(struct bio *bp) 977{ 978 struct g_mirror_softc *sc; 979 980 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 981 sc = bp->bio_from->geom->softc; 982 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 983 mtx_lock(&sc->sc_queue_mtx); 984 bioq_disksort(&sc->sc_queue, bp); 985 mtx_unlock(&sc->sc_queue_mtx); 986 wakeup(sc); 987} 988 989static void 990g_mirror_kernel_dump(struct bio *bp) 991{ 992 struct g_mirror_softc *sc; 993 struct g_mirror_disk *disk; 994 struct bio *cbp; 995 struct g_kerneldump *gkd; 996 997 /* 998 * We configure dumping to the first component, because this component 999 * will be used for reading with 'prefer' balance algorithm. 1000 * If the component with the higest priority is currently disconnected 1001 * we will not be able to read the dump after the reboot if it will be 1002 * connected and synchronized later. Can we do something better? 1003 */ 1004 sc = bp->bio_to->geom->softc; 1005 disk = LIST_FIRST(&sc->sc_disks); 1006 1007 gkd = (struct g_kerneldump *)bp->bio_data; 1008 if (gkd->length > bp->bio_to->mediasize) 1009 gkd->length = bp->bio_to->mediasize; 1010 cbp = g_clone_bio(bp); 1011 if (cbp == NULL) { 1012 g_io_deliver(bp, ENOMEM); 1013 return; 1014 } 1015 cbp->bio_done = g_std_done; 1016 g_io_request(cbp, disk->d_consumer); 1017 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1018 g_mirror_get_diskname(disk)); 1019} 1020 1021static void 1022g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1023{ 1024 struct bio_queue_head queue; 1025 struct g_mirror_disk *disk; 1026 struct g_consumer *cp; 1027 struct bio *cbp; 1028 1029 bioq_init(&queue); 1030 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1031 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1032 continue; 1033 cbp = g_clone_bio(bp); 1034 if (cbp == NULL) { 1035 for (cbp = bioq_first(&queue); cbp != NULL; 1036 cbp = bioq_first(&queue)) { 1037 bioq_remove(&queue, cbp); 1038 g_destroy_bio(cbp); 1039 } 1040 if (bp->bio_error == 0) 1041 bp->bio_error = ENOMEM; 1042 g_io_deliver(bp, bp->bio_error); 1043 return; 1044 } 1045 bioq_insert_tail(&queue, cbp); 1046 cbp->bio_done = g_std_done; 1047 cbp->bio_caller1 = disk; 1048 cbp->bio_to = disk->d_consumer->provider; 1049 } 1050 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1051 bioq_remove(&queue, cbp); 1052 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1053 disk = cbp->bio_caller1; 1054 cbp->bio_caller1 = NULL; 1055 cp = disk->d_consumer; 1056 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1057 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1058 cp->acr, cp->acw, cp->ace)); 1059 g_io_request(cbp, disk->d_consumer); 1060 } 1061} 1062 1063static void 1064g_mirror_start(struct bio *bp) 1065{ 1066 struct g_mirror_softc *sc; 1067 1068 sc = bp->bio_to->geom->softc; 1069 /* 1070 * If sc == NULL or there are no valid disks, provider's error 1071 * should be set and g_mirror_start() should not be called at all. 1072 */ 1073 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1074 ("Provider's error should be set (error=%d)(mirror=%s).", 1075 bp->bio_to->error, bp->bio_to->name)); 1076 G_MIRROR_LOGREQ(3, bp, "Request received."); 1077 1078 switch (bp->bio_cmd) { 1079 case BIO_READ: 1080 case BIO_WRITE: 1081 case BIO_DELETE: 1082 break; 1083 case BIO_FLUSH: 1084 g_mirror_flush(sc, bp); 1085 return; 1086 case BIO_GETATTR: 1087 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1088 g_mirror_kernel_dump(bp); 1089 return; 1090 } 1091 /* FALLTHROUGH */ 1092 default: 1093 g_io_deliver(bp, EOPNOTSUPP); 1094 return; 1095 } 1096 mtx_lock(&sc->sc_queue_mtx); 1097 bioq_disksort(&sc->sc_queue, bp); 1098 mtx_unlock(&sc->sc_queue_mtx); 1099 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1100 wakeup(sc); 1101} 1102 1103/* 1104 * Return TRUE if the given request is colliding with a in-progress 1105 * synchronization request. 1106 */ 1107static int 1108g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1109{ 1110 struct g_mirror_disk *disk; 1111 struct bio *sbp; 1112 off_t rstart, rend, sstart, send; 1113 int i; 1114 1115 if (sc->sc_sync.ds_ndisks == 0) 1116 return (0); 1117 rstart = bp->bio_offset; 1118 rend = bp->bio_offset + bp->bio_length; 1119 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1120 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1121 continue; 1122 for (i = 0; i < g_mirror_syncreqs; i++) { 1123 sbp = disk->d_sync.ds_bios[i]; 1124 if (sbp == NULL) 1125 continue; 1126 sstart = sbp->bio_offset; 1127 send = sbp->bio_offset + sbp->bio_length; 1128 if (rend > sstart && rstart < send) 1129 return (1); 1130 } 1131 } 1132 return (0); 1133} 1134 1135/* 1136 * Return TRUE if the given sync request is colliding with a in-progress regular 1137 * request. 1138 */ 1139static int 1140g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1141{ 1142 off_t rstart, rend, sstart, send; 1143 struct bio *bp; 1144 1145 if (sc->sc_sync.ds_ndisks == 0) 1146 return (0); 1147 sstart = sbp->bio_offset; 1148 send = sbp->bio_offset + sbp->bio_length; 1149 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1150 rstart = bp->bio_offset; 1151 rend = bp->bio_offset + bp->bio_length; 1152 if (rend > sstart && rstart < send) 1153 return (1); 1154 } 1155 return (0); 1156} 1157 1158/* 1159 * Puts request onto delayed queue. 1160 */ 1161static void 1162g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1163{ 1164 1165 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1166 bioq_insert_head(&sc->sc_regular_delayed, bp); 1167} 1168 1169/* 1170 * Puts synchronization request onto delayed queue. 1171 */ 1172static void 1173g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1174{ 1175 1176 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1177 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1178} 1179 1180/* 1181 * Releases delayed regular requests which don't collide anymore with sync 1182 * requests. 1183 */ 1184static void 1185g_mirror_regular_release(struct g_mirror_softc *sc) 1186{ 1187 struct bio *bp, *bp2; 1188 1189 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1190 if (g_mirror_sync_collision(sc, bp)) 1191 continue; 1192 bioq_remove(&sc->sc_regular_delayed, bp); 1193 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1194 mtx_lock(&sc->sc_queue_mtx); 1195 bioq_insert_head(&sc->sc_queue, bp); 1196#if 0 1197 /* 1198 * wakeup() is not needed, because this function is called from 1199 * the worker thread. 1200 */ 1201 wakeup(&sc->sc_queue); 1202#endif 1203 mtx_unlock(&sc->sc_queue_mtx); 1204 } 1205} 1206 1207/* 1208 * Releases delayed sync requests which don't collide anymore with regular 1209 * requests. 1210 */ 1211static void 1212g_mirror_sync_release(struct g_mirror_softc *sc) 1213{ 1214 struct bio *bp, *bp2; 1215 1216 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1217 if (g_mirror_regular_collision(sc, bp)) 1218 continue; 1219 bioq_remove(&sc->sc_sync_delayed, bp); 1220 G_MIRROR_LOGREQ(2, bp, 1221 "Releasing delayed synchronization request."); 1222 g_io_request(bp, bp->bio_from); 1223 } 1224} 1225 1226/* 1227 * Handle synchronization requests. 1228 * Every synchronization request is two-steps process: first, READ request is 1229 * send to active provider and then WRITE request (with read data) to the provider 1230 * beeing synchronized. When WRITE is finished, new synchronization request is 1231 * send. 1232 */ 1233static void 1234g_mirror_sync_request(struct bio *bp) 1235{ 1236 struct g_mirror_softc *sc; 1237 struct g_mirror_disk *disk; 1238 1239 bp->bio_from->index--; 1240 sc = bp->bio_from->geom->softc; 1241 disk = bp->bio_from->private; 1242 if (disk == NULL) { 1243 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1244 g_topology_lock(); 1245 g_mirror_kill_consumer(sc, bp->bio_from); 1246 g_topology_unlock(); 1247 free(bp->bio_data, M_MIRROR); 1248 g_destroy_bio(bp); 1249 sx_xlock(&sc->sc_lock); 1250 return; 1251 } 1252 1253 /* 1254 * Synchronization request. 1255 */ 1256 switch (bp->bio_cmd) { 1257 case BIO_READ: 1258 { 1259 struct g_consumer *cp; 1260 1261 if (bp->bio_error != 0) { 1262 G_MIRROR_LOGREQ(0, bp, 1263 "Synchronization request failed (error=%d).", 1264 bp->bio_error); 1265 g_destroy_bio(bp); 1266 return; 1267 } 1268 G_MIRROR_LOGREQ(3, bp, 1269 "Synchronization request half-finished."); 1270 bp->bio_cmd = BIO_WRITE; 1271 bp->bio_cflags = 0; 1272 cp = disk->d_consumer; 1273 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1274 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1275 cp->acr, cp->acw, cp->ace)); 1276 cp->index++; 1277 g_io_request(bp, cp); 1278 return; 1279 } 1280 case BIO_WRITE: 1281 { 1282 struct g_mirror_disk_sync *sync; 1283 off_t offset; 1284 void *data; 1285 int i; 1286 1287 if (bp->bio_error != 0) { 1288 G_MIRROR_LOGREQ(0, bp, 1289 "Synchronization request failed (error=%d).", 1290 bp->bio_error); 1291 g_destroy_bio(bp); 1292 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1293 g_mirror_event_send(disk, 1294 G_MIRROR_DISK_STATE_DISCONNECTED, 1295 G_MIRROR_EVENT_DONTWAIT); 1296 return; 1297 } 1298 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1299 sync = &disk->d_sync; 1300 if (sync->ds_offset == sc->sc_mediasize || 1301 sync->ds_consumer == NULL || 1302 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1303 /* Don't send more synchronization requests. */ 1304 sync->ds_inflight--; 1305 if (sync->ds_bios != NULL) { 1306 i = (int)(uintptr_t)bp->bio_caller1; 1307 sync->ds_bios[i] = NULL; 1308 } 1309 free(bp->bio_data, M_MIRROR); 1310 g_destroy_bio(bp); 1311 if (sync->ds_inflight > 0) 1312 return; 1313 if (sync->ds_consumer == NULL || 1314 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1315 return; 1316 } 1317 /* Disk up-to-date, activate it. */ 1318 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1319 G_MIRROR_EVENT_DONTWAIT); 1320 return; 1321 } 1322 1323 /* Send next synchronization request. */ 1324 data = bp->bio_data; 1325 bzero(bp, sizeof(*bp)); 1326 bp->bio_cmd = BIO_READ; 1327 bp->bio_offset = sync->ds_offset; 1328 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1329 sync->ds_offset += bp->bio_length; 1330 bp->bio_done = g_mirror_sync_done; 1331 bp->bio_data = data; 1332 bp->bio_from = sync->ds_consumer; 1333 bp->bio_to = sc->sc_provider; 1334 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1335 sync->ds_consumer->index++; 1336 /* 1337 * Delay the request if it is colliding with a regular request. 1338 */ 1339 if (g_mirror_regular_collision(sc, bp)) 1340 g_mirror_sync_delay(sc, bp); 1341 else 1342 g_io_request(bp, sync->ds_consumer); 1343 1344 /* Release delayed requests if possible. */ 1345 g_mirror_regular_release(sc); 1346 1347 /* Find the smallest offset */ 1348 offset = sc->sc_mediasize; 1349 for (i = 0; i < g_mirror_syncreqs; i++) { 1350 bp = sync->ds_bios[i]; 1351 if (bp->bio_offset < offset) 1352 offset = bp->bio_offset; 1353 } 1354 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1355 /* Update offset_done on every 100 blocks. */ 1356 sync->ds_offset_done = offset; 1357 g_mirror_update_metadata(disk); 1358 } 1359 return; 1360 } 1361 default: 1362 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1363 bp->bio_cmd, sc->sc_name)); 1364 break; 1365 } 1366} 1367 1368static void 1369g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1370{ 1371 struct g_mirror_disk *disk; 1372 struct g_consumer *cp; 1373 struct bio *cbp; 1374 1375 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1376 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1377 break; 1378 } 1379 if (disk == NULL) { 1380 if (bp->bio_error == 0) 1381 bp->bio_error = ENXIO; 1382 g_io_deliver(bp, bp->bio_error); 1383 return; 1384 } 1385 cbp = g_clone_bio(bp); 1386 if (cbp == NULL) { 1387 if (bp->bio_error == 0) 1388 bp->bio_error = ENOMEM; 1389 g_io_deliver(bp, bp->bio_error); 1390 return; 1391 } 1392 /* 1393 * Fill in the component buf structure. 1394 */ 1395 cp = disk->d_consumer; 1396 cbp->bio_done = g_mirror_done; 1397 cbp->bio_to = cp->provider; 1398 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1399 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1400 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1401 cp->acw, cp->ace)); 1402 cp->index++; 1403 g_io_request(cbp, cp); 1404} 1405 1406static void 1407g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1408{ 1409 struct g_mirror_disk *disk; 1410 struct g_consumer *cp; 1411 struct bio *cbp; 1412 1413 disk = g_mirror_get_disk(sc); 1414 if (disk == NULL) { 1415 if (bp->bio_error == 0) 1416 bp->bio_error = ENXIO; 1417 g_io_deliver(bp, bp->bio_error); 1418 return; 1419 } 1420 cbp = g_clone_bio(bp); 1421 if (cbp == NULL) { 1422 if (bp->bio_error == 0) 1423 bp->bio_error = ENOMEM; 1424 g_io_deliver(bp, bp->bio_error); 1425 return; 1426 } 1427 /* 1428 * Fill in the component buf structure. 1429 */ 1430 cp = disk->d_consumer; 1431 cbp->bio_done = g_mirror_done; 1432 cbp->bio_to = cp->provider; 1433 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1434 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1435 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1436 cp->acw, cp->ace)); 1437 cp->index++; 1438 g_io_request(cbp, cp); 1439} 1440 1441#define TRACK_SIZE (1 * 1024 * 1024) 1442#define LOAD_SCALE 256 1443#define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1444 1445static void 1446g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1447{ 1448 struct g_mirror_disk *disk, *dp; 1449 struct g_consumer *cp; 1450 struct bio *cbp; 1451 int prio, best; 1452 1453 /* Find a disk with the smallest load. */ 1454 disk = NULL; 1455 best = INT_MAX; 1456 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1457 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1458 continue; 1459 prio = dp->load; 1460 /* If disk head is precisely in position - highly prefer it. */ 1461 if (dp->d_last_offset == bp->bio_offset) 1462 prio -= 2 * LOAD_SCALE; 1463 else 1464 /* If disk head is close to position - prefer it. */ 1465 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1466 prio -= 1 * LOAD_SCALE; 1467 if (prio <= best) { 1468 disk = dp; 1469 best = prio; 1470 } 1471 } 1472 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1473 cbp = g_clone_bio(bp); 1474 if (cbp == NULL) { 1475 if (bp->bio_error == 0) 1476 bp->bio_error = ENOMEM; 1477 g_io_deliver(bp, bp->bio_error); 1478 return; 1479 } 1480 /* 1481 * Fill in the component buf structure. 1482 */ 1483 cp = disk->d_consumer; 1484 cbp->bio_done = g_mirror_done; 1485 cbp->bio_to = cp->provider; 1486 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1487 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1488 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1489 cp->acw, cp->ace)); 1490 cp->index++; 1491 /* Remember last head position */ 1492 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1493 /* Update loads. */ 1494 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1495 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1496 dp->load * 7) / 8; 1497 } 1498 g_io_request(cbp, cp); 1499} 1500 1501static void 1502g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1503{ 1504 struct bio_queue_head queue; 1505 struct g_mirror_disk *disk; 1506 struct g_consumer *cp; 1507 struct bio *cbp; 1508 off_t left, mod, offset, slice; 1509 u_char *data; 1510 u_int ndisks; 1511 1512 if (bp->bio_length <= sc->sc_slice) { 1513 g_mirror_request_round_robin(sc, bp); 1514 return; 1515 } 1516 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1517 slice = bp->bio_length / ndisks; 1518 mod = slice % sc->sc_provider->sectorsize; 1519 if (mod != 0) 1520 slice += sc->sc_provider->sectorsize - mod; 1521 /* 1522 * Allocate all bios before sending any request, so we can 1523 * return ENOMEM in nice and clean way. 1524 */ 1525 left = bp->bio_length; 1526 offset = bp->bio_offset; 1527 data = bp->bio_data; 1528 bioq_init(&queue); 1529 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1530 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1531 continue; 1532 cbp = g_clone_bio(bp); 1533 if (cbp == NULL) { 1534 for (cbp = bioq_first(&queue); cbp != NULL; 1535 cbp = bioq_first(&queue)) { 1536 bioq_remove(&queue, cbp); 1537 g_destroy_bio(cbp); 1538 } 1539 if (bp->bio_error == 0) 1540 bp->bio_error = ENOMEM; 1541 g_io_deliver(bp, bp->bio_error); 1542 return; 1543 } 1544 bioq_insert_tail(&queue, cbp); 1545 cbp->bio_done = g_mirror_done; 1546 cbp->bio_caller1 = disk; 1547 cbp->bio_to = disk->d_consumer->provider; 1548 cbp->bio_offset = offset; 1549 cbp->bio_data = data; 1550 cbp->bio_length = MIN(left, slice); 1551 left -= cbp->bio_length; 1552 if (left == 0) 1553 break; 1554 offset += cbp->bio_length; 1555 data += cbp->bio_length; 1556 } 1557 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1558 bioq_remove(&queue, cbp); 1559 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1560 disk = cbp->bio_caller1; 1561 cbp->bio_caller1 = NULL; 1562 cp = disk->d_consumer; 1563 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1564 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1565 cp->acr, cp->acw, cp->ace)); 1566 disk->d_consumer->index++; 1567 g_io_request(cbp, disk->d_consumer); 1568 } 1569} 1570 1571static void 1572g_mirror_register_request(struct bio *bp) 1573{ 1574 struct g_mirror_softc *sc; 1575 1576 sc = bp->bio_to->geom->softc; 1577 switch (bp->bio_cmd) { 1578 case BIO_READ: 1579 switch (sc->sc_balance) { 1580 case G_MIRROR_BALANCE_LOAD: 1581 g_mirror_request_load(sc, bp); 1582 break; 1583 case G_MIRROR_BALANCE_PREFER: 1584 g_mirror_request_prefer(sc, bp); 1585 break; 1586 case G_MIRROR_BALANCE_ROUND_ROBIN: 1587 g_mirror_request_round_robin(sc, bp); 1588 break; 1589 case G_MIRROR_BALANCE_SPLIT: 1590 g_mirror_request_split(sc, bp); 1591 break; 1592 } 1593 return; 1594 case BIO_WRITE: 1595 case BIO_DELETE: 1596 { 1597 struct g_mirror_disk *disk; 1598 struct g_mirror_disk_sync *sync; 1599 struct bio_queue_head queue; 1600 struct g_consumer *cp; 1601 struct bio *cbp; 1602 1603 /* 1604 * Delay the request if it is colliding with a synchronization 1605 * request. 1606 */ 1607 if (g_mirror_sync_collision(sc, bp)) { 1608 g_mirror_regular_delay(sc, bp); 1609 return; 1610 } 1611 1612 if (sc->sc_idle) 1613 g_mirror_unidle(sc); 1614 else 1615 sc->sc_last_write = time_uptime; 1616 1617 /* 1618 * Allocate all bios before sending any request, so we can 1619 * return ENOMEM in nice and clean way. 1620 */ 1621 bioq_init(&queue); 1622 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1623 sync = &disk->d_sync; 1624 switch (disk->d_state) { 1625 case G_MIRROR_DISK_STATE_ACTIVE: 1626 break; 1627 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1628 if (bp->bio_offset >= sync->ds_offset) 1629 continue; 1630 break; 1631 default: 1632 continue; 1633 } 1634 cbp = g_clone_bio(bp); 1635 if (cbp == NULL) { 1636 for (cbp = bioq_first(&queue); cbp != NULL; 1637 cbp = bioq_first(&queue)) { 1638 bioq_remove(&queue, cbp); 1639 g_destroy_bio(cbp); 1640 } 1641 if (bp->bio_error == 0) 1642 bp->bio_error = ENOMEM; 1643 g_io_deliver(bp, bp->bio_error); 1644 return; 1645 } 1646 bioq_insert_tail(&queue, cbp); 1647 cbp->bio_done = g_mirror_done; 1648 cp = disk->d_consumer; 1649 cbp->bio_caller1 = cp; 1650 cbp->bio_to = cp->provider; 1651 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1652 ("Consumer %s not opened (r%dw%de%d).", 1653 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1654 } 1655 for (cbp = bioq_first(&queue); cbp != NULL; 1656 cbp = bioq_first(&queue)) { 1657 bioq_remove(&queue, cbp); 1658 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1659 cp = cbp->bio_caller1; 1660 cbp->bio_caller1 = NULL; 1661 cp->index++; 1662 sc->sc_writes++; 1663 g_io_request(cbp, cp); 1664 } 1665 /* 1666 * Put request onto inflight queue, so we can check if new 1667 * synchronization requests don't collide with it. 1668 */ 1669 bioq_insert_tail(&sc->sc_inflight, bp); 1670 /* 1671 * Bump syncid on first write. 1672 */ 1673 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1674 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1675 g_mirror_bump_syncid(sc); 1676 } 1677 return; 1678 } 1679 default: 1680 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1681 bp->bio_cmd, sc->sc_name)); 1682 break; 1683 } 1684} 1685 1686static int 1687g_mirror_can_destroy(struct g_mirror_softc *sc) 1688{ 1689 struct g_geom *gp; 1690 struct g_consumer *cp; 1691 1692 g_topology_assert(); 1693 gp = sc->sc_geom; 1694 if (gp->softc == NULL) 1695 return (1); 1696 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1697 return (0); 1698 LIST_FOREACH(cp, &gp->consumer, consumer) { 1699 if (g_mirror_is_busy(sc, cp)) 1700 return (0); 1701 } 1702 gp = sc->sc_sync.ds_geom; 1703 LIST_FOREACH(cp, &gp->consumer, consumer) { 1704 if (g_mirror_is_busy(sc, cp)) 1705 return (0); 1706 } 1707 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1708 sc->sc_name); 1709 return (1); 1710} 1711 1712static int 1713g_mirror_try_destroy(struct g_mirror_softc *sc) 1714{ 1715 1716 if (sc->sc_rootmount != NULL) { 1717 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1718 sc->sc_rootmount); 1719 root_mount_rel(sc->sc_rootmount); 1720 sc->sc_rootmount = NULL; 1721 } 1722 g_topology_lock(); 1723 if (!g_mirror_can_destroy(sc)) { 1724 g_topology_unlock(); 1725 return (0); 1726 } 1727 sc->sc_geom->softc = NULL; 1728 sc->sc_sync.ds_geom->softc = NULL; 1729 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1730 g_topology_unlock(); 1731 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1732 &sc->sc_worker); 1733 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1734 sx_xunlock(&sc->sc_lock); 1735 wakeup(&sc->sc_worker); 1736 sc->sc_worker = NULL; 1737 } else { 1738 g_topology_unlock(); 1739 g_mirror_destroy_device(sc); 1740 free(sc, M_MIRROR); 1741 } 1742 return (1); 1743} 1744 1745/* 1746 * Worker thread. 1747 */ 1748static void 1749g_mirror_worker(void *arg) 1750{ 1751 struct g_mirror_softc *sc; 1752 struct g_mirror_event *ep; 1753 struct bio *bp; 1754 int timeout; 1755 1756 sc = arg; 1757 thread_lock(curthread); 1758 sched_prio(curthread, PRIBIO); 1759 thread_unlock(curthread); 1760 1761 sx_xlock(&sc->sc_lock); 1762 for (;;) { 1763 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1764 /* 1765 * First take a look at events. 1766 * This is important to handle events before any I/O requests. 1767 */ 1768 ep = g_mirror_event_get(sc); 1769 if (ep != NULL) { 1770 g_mirror_event_remove(sc, ep); 1771 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1772 /* Update only device status. */ 1773 G_MIRROR_DEBUG(3, 1774 "Running event for device %s.", 1775 sc->sc_name); 1776 ep->e_error = 0; 1777 g_mirror_update_device(sc, 1); 1778 } else { 1779 /* Update disk status. */ 1780 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1781 g_mirror_get_diskname(ep->e_disk)); 1782 ep->e_error = g_mirror_update_disk(ep->e_disk, 1783 ep->e_state); 1784 if (ep->e_error == 0) 1785 g_mirror_update_device(sc, 0); 1786 } 1787 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1788 KASSERT(ep->e_error == 0, 1789 ("Error cannot be handled.")); 1790 g_mirror_event_free(ep); 1791 } else { 1792 ep->e_flags |= G_MIRROR_EVENT_DONE; 1793 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1794 ep); 1795 mtx_lock(&sc->sc_events_mtx); 1796 wakeup(ep); 1797 mtx_unlock(&sc->sc_events_mtx); 1798 } 1799 if ((sc->sc_flags & 1800 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1801 if (g_mirror_try_destroy(sc)) { 1802 curthread->td_pflags &= ~TDP_GEOM; 1803 G_MIRROR_DEBUG(1, "Thread exiting."); 1804 kproc_exit(0); 1805 } 1806 } 1807 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1808 continue; 1809 } 1810 /* 1811 * Check if we can mark array as CLEAN and if we can't take 1812 * how much seconds should we wait. 1813 */ 1814 timeout = g_mirror_idle(sc, -1); 1815 /* 1816 * Now I/O requests. 1817 */ 1818 /* Get first request from the queue. */ 1819 mtx_lock(&sc->sc_queue_mtx); 1820 bp = bioq_first(&sc->sc_queue); 1821 if (bp == NULL) { 1822 if ((sc->sc_flags & 1823 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1824 mtx_unlock(&sc->sc_queue_mtx); 1825 if (g_mirror_try_destroy(sc)) { 1826 curthread->td_pflags &= ~TDP_GEOM; 1827 G_MIRROR_DEBUG(1, "Thread exiting."); 1828 kproc_exit(0); 1829 } 1830 mtx_lock(&sc->sc_queue_mtx); 1831 } 1832 sx_xunlock(&sc->sc_lock); 1833 /* 1834 * XXX: We can miss an event here, because an event 1835 * can be added without sx-device-lock and without 1836 * mtx-queue-lock. Maybe I should just stop using 1837 * dedicated mutex for events synchronization and 1838 * stick with the queue lock? 1839 * The event will hang here until next I/O request 1840 * or next event is received. 1841 */ 1842 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1843 timeout * hz); 1844 sx_xlock(&sc->sc_lock); 1845 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1846 continue; 1847 } 1848 bioq_remove(&sc->sc_queue, bp); 1849 mtx_unlock(&sc->sc_queue_mtx); 1850 1851 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1852 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1853 g_mirror_sync_request(bp); /* READ */ 1854 } else if (bp->bio_to != sc->sc_provider) { 1855 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1856 g_mirror_regular_request(bp); 1857 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1858 g_mirror_sync_request(bp); /* WRITE */ 1859 else { 1860 KASSERT(0, 1861 ("Invalid request cflags=0x%hhx to=%s.", 1862 bp->bio_cflags, bp->bio_to->name)); 1863 } 1864 } else { 1865 g_mirror_register_request(bp); 1866 } 1867 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1868 } 1869} 1870 1871static void 1872g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1873{ 1874 1875 sx_assert(&sc->sc_lock, SX_LOCKED); 1876 1877 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1878 return; 1879 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1880 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1881 g_mirror_get_diskname(disk), sc->sc_name); 1882 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1883 } else if (sc->sc_idle && 1884 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1885 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1886 g_mirror_get_diskname(disk), sc->sc_name); 1887 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1888 } 1889} 1890 1891static void 1892g_mirror_sync_start(struct g_mirror_disk *disk) 1893{ 1894 struct g_mirror_softc *sc; 1895 struct g_consumer *cp; 1896 struct bio *bp; 1897 int error, i; 1898 1899 g_topology_assert_not(); 1900 sc = disk->d_softc; 1901 sx_assert(&sc->sc_lock, SX_LOCKED); 1902 1903 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1904 ("Disk %s is not marked for synchronization.", 1905 g_mirror_get_diskname(disk))); 1906 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1907 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1908 sc->sc_state)); 1909 1910 sx_xunlock(&sc->sc_lock); 1911 g_topology_lock(); 1912 cp = g_new_consumer(sc->sc_sync.ds_geom); 1913 error = g_attach(cp, sc->sc_provider); 1914 KASSERT(error == 0, 1915 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1916 error = g_access(cp, 1, 0, 0); 1917 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1918 g_topology_unlock(); 1919 sx_xlock(&sc->sc_lock); 1920 1921 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1922 g_mirror_get_diskname(disk)); 1923 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1924 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1925 KASSERT(disk->d_sync.ds_consumer == NULL, 1926 ("Sync consumer already exists (device=%s, disk=%s).", 1927 sc->sc_name, g_mirror_get_diskname(disk))); 1928 1929 disk->d_sync.ds_consumer = cp; 1930 disk->d_sync.ds_consumer->private = disk; 1931 disk->d_sync.ds_consumer->index = 0; 1932 1933 /* 1934 * Allocate memory for synchronization bios and initialize them. 1935 */ 1936 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1937 M_MIRROR, M_WAITOK); 1938 for (i = 0; i < g_mirror_syncreqs; i++) { 1939 bp = g_alloc_bio(); 1940 disk->d_sync.ds_bios[i] = bp; 1941 bp->bio_parent = NULL; 1942 bp->bio_cmd = BIO_READ; 1943 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1944 bp->bio_cflags = 0; 1945 bp->bio_offset = disk->d_sync.ds_offset; 1946 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1947 disk->d_sync.ds_offset += bp->bio_length; 1948 bp->bio_done = g_mirror_sync_done; 1949 bp->bio_from = disk->d_sync.ds_consumer; 1950 bp->bio_to = sc->sc_provider; 1951 bp->bio_caller1 = (void *)(uintptr_t)i; 1952 } 1953 1954 /* Increase the number of disks in SYNCHRONIZING state. */ 1955 sc->sc_sync.ds_ndisks++; 1956 /* Set the number of in-flight synchronization requests. */ 1957 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1958 1959 /* 1960 * Fire off first synchronization requests. 1961 */ 1962 for (i = 0; i < g_mirror_syncreqs; i++) { 1963 bp = disk->d_sync.ds_bios[i]; 1964 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1965 disk->d_sync.ds_consumer->index++; 1966 /* 1967 * Delay the request if it is colliding with a regular request. 1968 */ 1969 if (g_mirror_regular_collision(sc, bp)) 1970 g_mirror_sync_delay(sc, bp); 1971 else 1972 g_io_request(bp, disk->d_sync.ds_consumer); 1973 } 1974} 1975 1976/* 1977 * Stop synchronization process. 1978 * type: 0 - synchronization finished 1979 * 1 - synchronization stopped 1980 */ 1981static void 1982g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 1983{ 1984 struct g_mirror_softc *sc; 1985 struct g_consumer *cp; 1986 1987 g_topology_assert_not(); 1988 sc = disk->d_softc; 1989 sx_assert(&sc->sc_lock, SX_LOCKED); 1990 1991 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1992 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 1993 g_mirror_disk_state2str(disk->d_state))); 1994 if (disk->d_sync.ds_consumer == NULL) 1995 return; 1996 1997 if (type == 0) { 1998 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 1999 sc->sc_name, g_mirror_get_diskname(disk)); 2000 } else /* if (type == 1) */ { 2001 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2002 sc->sc_name, g_mirror_get_diskname(disk)); 2003 } 2004 free(disk->d_sync.ds_bios, M_MIRROR); 2005 disk->d_sync.ds_bios = NULL; 2006 cp = disk->d_sync.ds_consumer; 2007 disk->d_sync.ds_consumer = NULL; 2008 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2009 sc->sc_sync.ds_ndisks--; 2010 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2011 g_topology_lock(); 2012 g_mirror_kill_consumer(sc, cp); 2013 g_topology_unlock(); 2014 sx_xlock(&sc->sc_lock); 2015} 2016 2017static void 2018g_mirror_launch_provider(struct g_mirror_softc *sc) 2019{ 2020 struct g_mirror_disk *disk; 2021 struct g_provider *pp; 2022 2023 sx_assert(&sc->sc_lock, SX_LOCKED); 2024 2025 g_topology_lock(); 2026 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2027 pp->mediasize = sc->sc_mediasize; 2028 pp->sectorsize = sc->sc_sectorsize; 2029 pp->stripesize = 0; 2030 pp->stripeoffset = 0; 2031 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2032 if (disk->d_consumer && disk->d_consumer->provider && 2033 disk->d_consumer->provider->stripesize > pp->stripesize) { 2034 pp->stripesize = disk->d_consumer->provider->stripesize; 2035 pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2036 } 2037 } 2038 sc->sc_provider = pp; 2039 g_error_provider(pp, 0); 2040 g_topology_unlock(); 2041 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2042 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2043 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2044 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2045 g_mirror_sync_start(disk); 2046 } 2047} 2048 2049static void 2050g_mirror_destroy_provider(struct g_mirror_softc *sc) 2051{ 2052 struct g_mirror_disk *disk; 2053 struct bio *bp; 2054 2055 g_topology_assert_not(); 2056 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2057 sc->sc_name)); 2058 2059 g_topology_lock(); 2060 g_error_provider(sc->sc_provider, ENXIO); 2061 mtx_lock(&sc->sc_queue_mtx); 2062 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2063 bioq_remove(&sc->sc_queue, bp); 2064 g_io_deliver(bp, ENXIO); 2065 } 2066 mtx_unlock(&sc->sc_queue_mtx); 2067 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2068 sc->sc_provider->name); 2069 sc->sc_provider->flags |= G_PF_WITHER; 2070 g_orphan_provider(sc->sc_provider, ENXIO); 2071 g_topology_unlock(); 2072 sc->sc_provider = NULL; 2073 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2074 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2075 g_mirror_sync_stop(disk, 1); 2076 } 2077} 2078 2079static void 2080g_mirror_go(void *arg) 2081{ 2082 struct g_mirror_softc *sc; 2083 2084 sc = arg; 2085 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2086 g_mirror_event_send(sc, 0, 2087 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2088} 2089 2090static u_int 2091g_mirror_determine_state(struct g_mirror_disk *disk) 2092{ 2093 struct g_mirror_softc *sc; 2094 u_int state; 2095 2096 sc = disk->d_softc; 2097 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2098 if ((disk->d_flags & 2099 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2100 /* Disk does not need synchronization. */ 2101 state = G_MIRROR_DISK_STATE_ACTIVE; 2102 } else { 2103 if ((sc->sc_flags & 2104 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2105 (disk->d_flags & 2106 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2107 /* 2108 * We can start synchronization from 2109 * the stored offset. 2110 */ 2111 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2112 } else { 2113 state = G_MIRROR_DISK_STATE_STALE; 2114 } 2115 } 2116 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2117 /* 2118 * Reset all synchronization data for this disk, 2119 * because if it even was synchronized, it was 2120 * synchronized to disks with different syncid. 2121 */ 2122 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2123 disk->d_sync.ds_offset = 0; 2124 disk->d_sync.ds_offset_done = 0; 2125 disk->d_sync.ds_syncid = sc->sc_syncid; 2126 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2127 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2128 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2129 } else { 2130 state = G_MIRROR_DISK_STATE_STALE; 2131 } 2132 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2133 /* 2134 * Not good, NOT GOOD! 2135 * It means that mirror was started on stale disks 2136 * and more fresh disk just arrive. 2137 * If there were writes, mirror is broken, sorry. 2138 * I think the best choice here is don't touch 2139 * this disk and inform the user loudly. 2140 */ 2141 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2142 "disk (%s) arrives!! It will not be connected to the " 2143 "running device.", sc->sc_name, 2144 g_mirror_get_diskname(disk)); 2145 g_mirror_destroy_disk(disk); 2146 state = G_MIRROR_DISK_STATE_NONE; 2147 /* Return immediately, because disk was destroyed. */ 2148 return (state); 2149 } 2150 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2151 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2152 return (state); 2153} 2154 2155/* 2156 * Update device state. 2157 */ 2158static void 2159g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2160{ 2161 struct g_mirror_disk *disk; 2162 u_int state; 2163 2164 sx_assert(&sc->sc_lock, SX_XLOCKED); 2165 2166 switch (sc->sc_state) { 2167 case G_MIRROR_DEVICE_STATE_STARTING: 2168 { 2169 struct g_mirror_disk *pdisk, *tdisk; 2170 u_int dirty, ndisks, genid, syncid; 2171 2172 KASSERT(sc->sc_provider == NULL, 2173 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2174 /* 2175 * Are we ready? We are, if all disks are connected or 2176 * if we have any disks and 'force' is true. 2177 */ 2178 ndisks = g_mirror_ndisks(sc, -1); 2179 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2180 ; 2181 } else if (ndisks == 0) { 2182 /* 2183 * Disks went down in starting phase, so destroy 2184 * device. 2185 */ 2186 callout_drain(&sc->sc_callout); 2187 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2188 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2189 sc->sc_rootmount); 2190 root_mount_rel(sc->sc_rootmount); 2191 sc->sc_rootmount = NULL; 2192 return; 2193 } else { 2194 return; 2195 } 2196 2197 /* 2198 * Activate all disks with the biggest syncid. 2199 */ 2200 if (force) { 2201 /* 2202 * If 'force' is true, we have been called due to 2203 * timeout, so don't bother canceling timeout. 2204 */ 2205 ndisks = 0; 2206 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2207 if ((disk->d_flags & 2208 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2209 ndisks++; 2210 } 2211 } 2212 if (ndisks == 0) { 2213 /* No valid disks found, destroy device. */ 2214 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2215 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2216 __LINE__, sc->sc_rootmount); 2217 root_mount_rel(sc->sc_rootmount); 2218 sc->sc_rootmount = NULL; 2219 return; 2220 } 2221 } else { 2222 /* Cancel timeout. */ 2223 callout_drain(&sc->sc_callout); 2224 } 2225 2226 /* 2227 * Find the biggest genid. 2228 */ 2229 genid = 0; 2230 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2231 if (disk->d_genid > genid) 2232 genid = disk->d_genid; 2233 } 2234 sc->sc_genid = genid; 2235 /* 2236 * Remove all disks without the biggest genid. 2237 */ 2238 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2239 if (disk->d_genid < genid) { 2240 G_MIRROR_DEBUG(0, 2241 "Component %s (device %s) broken, skipping.", 2242 g_mirror_get_diskname(disk), sc->sc_name); 2243 g_mirror_destroy_disk(disk); 2244 } 2245 } 2246 2247 /* 2248 * Find the biggest syncid. 2249 */ 2250 syncid = 0; 2251 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2252 if (disk->d_sync.ds_syncid > syncid) 2253 syncid = disk->d_sync.ds_syncid; 2254 } 2255 2256 /* 2257 * Here we need to look for dirty disks and if all disks 2258 * with the biggest syncid are dirty, we have to choose 2259 * one with the biggest priority and rebuild the rest. 2260 */ 2261 /* 2262 * Find the number of dirty disks with the biggest syncid. 2263 * Find the number of disks with the biggest syncid. 2264 * While here, find a disk with the biggest priority. 2265 */ 2266 dirty = ndisks = 0; 2267 pdisk = NULL; 2268 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2269 if (disk->d_sync.ds_syncid != syncid) 2270 continue; 2271 if ((disk->d_flags & 2272 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2273 continue; 2274 } 2275 ndisks++; 2276 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2277 dirty++; 2278 if (pdisk == NULL || 2279 pdisk->d_priority < disk->d_priority) { 2280 pdisk = disk; 2281 } 2282 } 2283 } 2284 if (dirty == 0) { 2285 /* No dirty disks at all, great. */ 2286 } else if (dirty == ndisks) { 2287 /* 2288 * Force synchronization for all dirty disks except one 2289 * with the biggest priority. 2290 */ 2291 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2292 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2293 "master disk for synchronization.", 2294 g_mirror_get_diskname(pdisk), sc->sc_name); 2295 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2296 if (disk->d_sync.ds_syncid != syncid) 2297 continue; 2298 if ((disk->d_flags & 2299 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2300 continue; 2301 } 2302 KASSERT((disk->d_flags & 2303 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2304 ("Disk %s isn't marked as dirty.", 2305 g_mirror_get_diskname(disk))); 2306 /* Skip the disk with the biggest priority. */ 2307 if (disk == pdisk) 2308 continue; 2309 disk->d_sync.ds_syncid = 0; 2310 } 2311 } else if (dirty < ndisks) { 2312 /* 2313 * Force synchronization for all dirty disks. 2314 * We have some non-dirty disks. 2315 */ 2316 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2317 if (disk->d_sync.ds_syncid != syncid) 2318 continue; 2319 if ((disk->d_flags & 2320 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2321 continue; 2322 } 2323 if ((disk->d_flags & 2324 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2325 continue; 2326 } 2327 disk->d_sync.ds_syncid = 0; 2328 } 2329 } 2330 2331 /* Reset hint. */ 2332 sc->sc_hint = NULL; 2333 sc->sc_syncid = syncid; 2334 if (force) { 2335 /* Remember to bump syncid on first write. */ 2336 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2337 } 2338 state = G_MIRROR_DEVICE_STATE_RUNNING; 2339 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2340 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2341 g_mirror_device_state2str(state)); 2342 sc->sc_state = state; 2343 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2344 state = g_mirror_determine_state(disk); 2345 g_mirror_event_send(disk, state, 2346 G_MIRROR_EVENT_DONTWAIT); 2347 if (state == G_MIRROR_DISK_STATE_STALE) 2348 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2349 } 2350 break; 2351 } 2352 case G_MIRROR_DEVICE_STATE_RUNNING: 2353 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2354 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2355 /* 2356 * No active disks or no disks at all, 2357 * so destroy device. 2358 */ 2359 if (sc->sc_provider != NULL) 2360 g_mirror_destroy_provider(sc); 2361 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2362 break; 2363 } else if (g_mirror_ndisks(sc, 2364 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2365 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2366 /* 2367 * We have active disks, launch provider if it doesn't 2368 * exist. 2369 */ 2370 if (sc->sc_provider == NULL) 2371 g_mirror_launch_provider(sc); 2372 if (sc->sc_rootmount != NULL) { 2373 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2374 __LINE__, sc->sc_rootmount); 2375 root_mount_rel(sc->sc_rootmount); 2376 sc->sc_rootmount = NULL; 2377 } 2378 } 2379 /* 2380 * Genid should be bumped immediately, so do it here. 2381 */ 2382 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2383 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2384 g_mirror_bump_genid(sc); 2385 } 2386 break; 2387 default: 2388 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2389 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2390 break; 2391 } 2392} 2393 2394/* 2395 * Update disk state and device state if needed. 2396 */ 2397#define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2398 "Disk %s state changed from %s to %s (device %s).", \ 2399 g_mirror_get_diskname(disk), \ 2400 g_mirror_disk_state2str(disk->d_state), \ 2401 g_mirror_disk_state2str(state), sc->sc_name) 2402static int 2403g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2404{ 2405 struct g_mirror_softc *sc; 2406 2407 sc = disk->d_softc; 2408 sx_assert(&sc->sc_lock, SX_XLOCKED); 2409 2410again: 2411 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2412 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2413 g_mirror_disk_state2str(state)); 2414 switch (state) { 2415 case G_MIRROR_DISK_STATE_NEW: 2416 /* 2417 * Possible scenarios: 2418 * 1. New disk arrive. 2419 */ 2420 /* Previous state should be NONE. */ 2421 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2422 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2423 g_mirror_disk_state2str(disk->d_state))); 2424 DISK_STATE_CHANGED(); 2425 2426 disk->d_state = state; 2427 if (LIST_EMPTY(&sc->sc_disks)) 2428 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2429 else { 2430 struct g_mirror_disk *dp; 2431 2432 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2433 if (disk->d_priority >= dp->d_priority) { 2434 LIST_INSERT_BEFORE(dp, disk, d_next); 2435 dp = NULL; 2436 break; 2437 } 2438 if (LIST_NEXT(dp, d_next) == NULL) 2439 break; 2440 } 2441 if (dp != NULL) 2442 LIST_INSERT_AFTER(dp, disk, d_next); 2443 } 2444 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2445 sc->sc_name, g_mirror_get_diskname(disk)); 2446 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2447 break; 2448 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2449 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2450 g_mirror_device_state2str(sc->sc_state), 2451 g_mirror_get_diskname(disk), 2452 g_mirror_disk_state2str(disk->d_state))); 2453 state = g_mirror_determine_state(disk); 2454 if (state != G_MIRROR_DISK_STATE_NONE) 2455 goto again; 2456 break; 2457 case G_MIRROR_DISK_STATE_ACTIVE: 2458 /* 2459 * Possible scenarios: 2460 * 1. New disk does not need synchronization. 2461 * 2. Synchronization process finished successfully. 2462 */ 2463 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2464 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2465 g_mirror_device_state2str(sc->sc_state), 2466 g_mirror_get_diskname(disk), 2467 g_mirror_disk_state2str(disk->d_state))); 2468 /* Previous state should be NEW or SYNCHRONIZING. */ 2469 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2470 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2471 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2472 g_mirror_disk_state2str(disk->d_state))); 2473 DISK_STATE_CHANGED(); 2474 2475 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2476 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2477 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2478 g_mirror_sync_stop(disk, 0); 2479 } 2480 disk->d_state = state; 2481 disk->d_sync.ds_offset = 0; 2482 disk->d_sync.ds_offset_done = 0; 2483 g_mirror_update_idle(sc, disk); 2484 g_mirror_update_metadata(disk); 2485 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2486 sc->sc_name, g_mirror_get_diskname(disk)); 2487 break; 2488 case G_MIRROR_DISK_STATE_STALE: 2489 /* 2490 * Possible scenarios: 2491 * 1. Stale disk was connected. 2492 */ 2493 /* Previous state should be NEW. */ 2494 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2495 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2496 g_mirror_disk_state2str(disk->d_state))); 2497 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2498 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2499 g_mirror_device_state2str(sc->sc_state), 2500 g_mirror_get_diskname(disk), 2501 g_mirror_disk_state2str(disk->d_state))); 2502 /* 2503 * STALE state is only possible if device is marked 2504 * NOAUTOSYNC. 2505 */ 2506 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2507 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2508 g_mirror_device_state2str(sc->sc_state), 2509 g_mirror_get_diskname(disk), 2510 g_mirror_disk_state2str(disk->d_state))); 2511 DISK_STATE_CHANGED(); 2512 2513 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2514 disk->d_state = state; 2515 g_mirror_update_metadata(disk); 2516 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2517 sc->sc_name, g_mirror_get_diskname(disk)); 2518 break; 2519 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2520 /* 2521 * Possible scenarios: 2522 * 1. Disk which needs synchronization was connected. 2523 */ 2524 /* Previous state should be NEW. */ 2525 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2526 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2527 g_mirror_disk_state2str(disk->d_state))); 2528 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2529 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2530 g_mirror_device_state2str(sc->sc_state), 2531 g_mirror_get_diskname(disk), 2532 g_mirror_disk_state2str(disk->d_state))); 2533 DISK_STATE_CHANGED(); 2534 2535 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2536 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2537 disk->d_state = state; 2538 if (sc->sc_provider != NULL) { 2539 g_mirror_sync_start(disk); 2540 g_mirror_update_metadata(disk); 2541 } 2542 break; 2543 case G_MIRROR_DISK_STATE_DISCONNECTED: 2544 /* 2545 * Possible scenarios: 2546 * 1. Device wasn't running yet, but disk disappear. 2547 * 2. Disk was active and disapppear. 2548 * 3. Disk disappear during synchronization process. 2549 */ 2550 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2551 /* 2552 * Previous state should be ACTIVE, STALE or 2553 * SYNCHRONIZING. 2554 */ 2555 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2556 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2557 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2558 ("Wrong disk state (%s, %s).", 2559 g_mirror_get_diskname(disk), 2560 g_mirror_disk_state2str(disk->d_state))); 2561 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2562 /* Previous state should be NEW. */ 2563 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2564 ("Wrong disk state (%s, %s).", 2565 g_mirror_get_diskname(disk), 2566 g_mirror_disk_state2str(disk->d_state))); 2567 /* 2568 * Reset bumping syncid if disk disappeared in STARTING 2569 * state. 2570 */ 2571 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2572 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2573#ifdef INVARIANTS 2574 } else { 2575 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2576 sc->sc_name, 2577 g_mirror_device_state2str(sc->sc_state), 2578 g_mirror_get_diskname(disk), 2579 g_mirror_disk_state2str(disk->d_state))); 2580#endif 2581 } 2582 DISK_STATE_CHANGED(); 2583 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2584 sc->sc_name, g_mirror_get_diskname(disk)); 2585 2586 g_mirror_destroy_disk(disk); 2587 break; 2588 case G_MIRROR_DISK_STATE_DESTROY: 2589 { 2590 int error; 2591 2592 error = g_mirror_clear_metadata(disk); 2593 if (error != 0) 2594 return (error); 2595 DISK_STATE_CHANGED(); 2596 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2597 sc->sc_name, g_mirror_get_diskname(disk)); 2598 2599 g_mirror_destroy_disk(disk); 2600 sc->sc_ndisks--; 2601 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2602 g_mirror_update_metadata(disk); 2603 } 2604 break; 2605 } 2606 default: 2607 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2608 break; 2609 } 2610 return (0); 2611} 2612#undef DISK_STATE_CHANGED 2613 2614int 2615g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2616{ 2617 struct g_provider *pp; 2618 u_char *buf; 2619 int error; 2620 2621 g_topology_assert(); 2622 2623 error = g_access(cp, 1, 0, 0); 2624 if (error != 0) 2625 return (error); 2626 pp = cp->provider; 2627 g_topology_unlock(); 2628 /* Metadata are stored on last sector. */ 2629 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2630 &error); 2631 g_topology_lock(); 2632 g_access(cp, -1, 0, 0); 2633 if (buf == NULL) { 2634 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2635 cp->provider->name, error); 2636 return (error); 2637 } 2638 2639 /* Decode metadata. */ 2640 error = mirror_metadata_decode(buf, md); 2641 g_free(buf); 2642 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2643 return (EINVAL); 2644 if (md->md_version > G_MIRROR_VERSION) { 2645 G_MIRROR_DEBUG(0, 2646 "Kernel module is too old to handle metadata from %s.", 2647 cp->provider->name); 2648 return (EINVAL); 2649 } 2650 if (error != 0) { 2651 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2652 cp->provider->name); 2653 return (error); 2654 } 2655 2656 return (0); 2657} 2658 2659static int 2660g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2661 struct g_mirror_metadata *md) 2662{ 2663 2664 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2665 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2666 pp->name, md->md_did); 2667 return (EEXIST); 2668 } 2669 if (md->md_all != sc->sc_ndisks) { 2670 G_MIRROR_DEBUG(1, 2671 "Invalid '%s' field on disk %s (device %s), skipping.", 2672 "md_all", pp->name, sc->sc_name); 2673 return (EINVAL); 2674 } 2675 if (md->md_slice != sc->sc_slice) { 2676 G_MIRROR_DEBUG(1, 2677 "Invalid '%s' field on disk %s (device %s), skipping.", 2678 "md_slice", pp->name, sc->sc_name); 2679 return (EINVAL); 2680 } 2681 if (md->md_balance != sc->sc_balance) { 2682 G_MIRROR_DEBUG(1, 2683 "Invalid '%s' field on disk %s (device %s), skipping.", 2684 "md_balance", pp->name, sc->sc_name); 2685 return (EINVAL); 2686 } 2687 if (md->md_mediasize != sc->sc_mediasize) { 2688 G_MIRROR_DEBUG(1, 2689 "Invalid '%s' field on disk %s (device %s), skipping.", 2690 "md_mediasize", pp->name, sc->sc_name); 2691 return (EINVAL); 2692 } 2693 if (sc->sc_mediasize > pp->mediasize) { 2694 G_MIRROR_DEBUG(1, 2695 "Invalid size of disk %s (device %s), skipping.", pp->name, 2696 sc->sc_name); 2697 return (EINVAL); 2698 } 2699 if (md->md_sectorsize != sc->sc_sectorsize) { 2700 G_MIRROR_DEBUG(1, 2701 "Invalid '%s' field on disk %s (device %s), skipping.", 2702 "md_sectorsize", pp->name, sc->sc_name); 2703 return (EINVAL); 2704 } 2705 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2706 G_MIRROR_DEBUG(1, 2707 "Invalid sector size of disk %s (device %s), skipping.", 2708 pp->name, sc->sc_name); 2709 return (EINVAL); 2710 } 2711 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2712 G_MIRROR_DEBUG(1, 2713 "Invalid device flags on disk %s (device %s), skipping.", 2714 pp->name, sc->sc_name); 2715 return (EINVAL); 2716 } 2717 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2718 G_MIRROR_DEBUG(1, 2719 "Invalid disk flags on disk %s (device %s), skipping.", 2720 pp->name, sc->sc_name); 2721 return (EINVAL); 2722 } 2723 return (0); 2724} 2725 2726int 2727g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2728 struct g_mirror_metadata *md) 2729{ 2730 struct g_mirror_disk *disk; 2731 int error; 2732 2733 g_topology_assert_not(); 2734 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2735 2736 error = g_mirror_check_metadata(sc, pp, md); 2737 if (error != 0) 2738 return (error); 2739 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2740 md->md_genid < sc->sc_genid) { 2741 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2742 pp->name, sc->sc_name); 2743 return (EINVAL); 2744 } 2745 disk = g_mirror_init_disk(sc, pp, md, &error); 2746 if (disk == NULL) 2747 return (error); 2748 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2749 G_MIRROR_EVENT_WAIT); 2750 if (error != 0) 2751 return (error); 2752 if (md->md_version < G_MIRROR_VERSION) { 2753 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2754 pp->name, md->md_version, G_MIRROR_VERSION); 2755 g_mirror_update_metadata(disk); 2756 } 2757 return (0); 2758} 2759 2760static void 2761g_mirror_destroy_delayed(void *arg, int flag) 2762{ 2763 struct g_mirror_softc *sc; 2764 int error; 2765 2766 if (flag == EV_CANCEL) { 2767 G_MIRROR_DEBUG(1, "Destroying canceled."); 2768 return; 2769 } 2770 sc = arg; 2771 g_topology_unlock(); 2772 sx_xlock(&sc->sc_lock); 2773 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2774 ("DESTROY flag set on %s.", sc->sc_name)); 2775 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2776 ("DESTROYING flag not set on %s.", sc->sc_name)); 2777 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2778 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2779 if (error != 0) { 2780 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2781 sx_xunlock(&sc->sc_lock); 2782 } 2783 g_topology_lock(); 2784} 2785 2786static int 2787g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2788{ 2789 struct g_mirror_softc *sc; 2790 int dcr, dcw, dce, error = 0; 2791 2792 g_topology_assert(); 2793 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2794 acw, ace); 2795 2796 sc = pp->geom->softc; 2797 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2798 return (0); 2799 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2800 2801 dcr = pp->acr + acr; 2802 dcw = pp->acw + acw; 2803 dce = pp->ace + ace; 2804 2805 g_topology_unlock(); 2806 sx_xlock(&sc->sc_lock); 2807 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2808 LIST_EMPTY(&sc->sc_disks)) { 2809 if (acr > 0 || acw > 0 || ace > 0) 2810 error = ENXIO; 2811 goto end; 2812 } 2813 if (dcw == 0 && !sc->sc_idle) 2814 g_mirror_idle(sc, dcw); 2815 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2816 if (acr > 0 || acw > 0 || ace > 0) { 2817 error = ENXIO; 2818 goto end; 2819 } 2820 if (dcr == 0 && dcw == 0 && dce == 0) { 2821 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2822 sc, NULL); 2823 } 2824 } 2825end: 2826 sx_xunlock(&sc->sc_lock); 2827 g_topology_lock(); 2828 return (error); 2829} 2830 2831static struct g_geom * 2832g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2833{ 2834 struct g_mirror_softc *sc; 2835 struct g_geom *gp; 2836 int error, timeout; 2837 2838 g_topology_assert(); 2839 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2840 md->md_mid); 2841 2842 /* One disk is minimum. */ 2843 if (md->md_all < 1) 2844 return (NULL); 2845 /* 2846 * Action geom. 2847 */ 2848 gp = g_new_geomf(mp, "%s", md->md_name); 2849 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2850 gp->start = g_mirror_start; 2851 gp->orphan = g_mirror_orphan; 2852 gp->access = g_mirror_access; 2853 gp->dumpconf = g_mirror_dumpconf; 2854 2855 sc->sc_id = md->md_mid; 2856 sc->sc_slice = md->md_slice; 2857 sc->sc_balance = md->md_balance; 2858 sc->sc_mediasize = md->md_mediasize; 2859 sc->sc_sectorsize = md->md_sectorsize; 2860 sc->sc_ndisks = md->md_all; 2861 sc->sc_flags = md->md_mflags; 2862 sc->sc_bump_id = 0; 2863 sc->sc_idle = 1; 2864 sc->sc_last_write = time_uptime; 2865 sc->sc_writes = 0; 2866 sx_init(&sc->sc_lock, "gmirror:lock"); 2867 bioq_init(&sc->sc_queue); 2868 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2869 bioq_init(&sc->sc_regular_delayed); 2870 bioq_init(&sc->sc_inflight); 2871 bioq_init(&sc->sc_sync_delayed); 2872 LIST_INIT(&sc->sc_disks); 2873 TAILQ_INIT(&sc->sc_events); 2874 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2875 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2876 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2877 gp->softc = sc; 2878 sc->sc_geom = gp; 2879 sc->sc_provider = NULL; 2880 /* 2881 * Synchronization geom. 2882 */ 2883 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2884 gp->softc = sc; 2885 gp->orphan = g_mirror_orphan; 2886 sc->sc_sync.ds_geom = gp; 2887 sc->sc_sync.ds_ndisks = 0; 2888 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2889 "g_mirror %s", md->md_name); 2890 if (error != 0) { 2891 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2892 sc->sc_name); 2893 g_destroy_geom(sc->sc_sync.ds_geom); 2894 mtx_destroy(&sc->sc_events_mtx); 2895 mtx_destroy(&sc->sc_queue_mtx); 2896 sx_destroy(&sc->sc_lock); 2897 g_destroy_geom(sc->sc_geom); 2898 free(sc, M_MIRROR); 2899 return (NULL); 2900 } 2901 2902 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2903 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2904 2905 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2906 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2907 /* 2908 * Run timeout. 2909 */ 2910 timeout = g_mirror_timeout * hz; 2911 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2912 return (sc->sc_geom); 2913} 2914 2915int 2916g_mirror_destroy(struct g_mirror_softc *sc, int how) 2917{ 2918 struct g_mirror_disk *disk; 2919 struct g_provider *pp; 2920 2921 g_topology_assert_not(); 2922 if (sc == NULL) 2923 return (ENXIO); 2924 sx_assert(&sc->sc_lock, SX_XLOCKED); 2925 2926 pp = sc->sc_provider; 2927 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2928 switch (how) { 2929 case G_MIRROR_DESTROY_SOFT: 2930 G_MIRROR_DEBUG(1, 2931 "Device %s is still open (r%dw%de%d).", pp->name, 2932 pp->acr, pp->acw, pp->ace); 2933 return (EBUSY); 2934 case G_MIRROR_DESTROY_DELAYED: 2935 G_MIRROR_DEBUG(1, 2936 "Device %s will be destroyed on last close.", 2937 pp->name); 2938 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2939 if (disk->d_state == 2940 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2941 g_mirror_sync_stop(disk, 1); 2942 } 2943 } 2944 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2945 return (EBUSY); 2946 case G_MIRROR_DESTROY_HARD: 2947 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2948 "can't be definitely removed.", pp->name); 2949 } 2950 } 2951 2952 g_topology_lock(); 2953 if (sc->sc_geom->softc == NULL) { 2954 g_topology_unlock(); 2955 return (0); 2956 } 2957 sc->sc_geom->softc = NULL; 2958 sc->sc_sync.ds_geom->softc = NULL; 2959 g_topology_unlock(); 2960 2961 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2962 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 2963 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 2964 sx_xunlock(&sc->sc_lock); 2965 mtx_lock(&sc->sc_queue_mtx); 2966 wakeup(sc); 2967 mtx_unlock(&sc->sc_queue_mtx); 2968 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 2969 while (sc->sc_worker != NULL) 2970 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 2971 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 2972 sx_xlock(&sc->sc_lock); 2973 g_mirror_destroy_device(sc); 2974 free(sc, M_MIRROR); 2975 return (0); 2976} 2977 2978static void 2979g_mirror_taste_orphan(struct g_consumer *cp) 2980{ 2981 2982 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 2983 cp->provider->name)); 2984} 2985 2986static struct g_geom * 2987g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 2988{ 2989 struct g_mirror_metadata md; 2990 struct g_mirror_softc *sc; 2991 struct g_consumer *cp; 2992 struct g_geom *gp; 2993 int error; 2994 2995 g_topology_assert(); 2996 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2997 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 2998 2999 gp = g_new_geomf(mp, "mirror:taste"); 3000 /* 3001 * This orphan function should be never called. 3002 */ 3003 gp->orphan = g_mirror_taste_orphan; 3004 cp = g_new_consumer(gp); 3005 g_attach(cp, pp); 3006 error = g_mirror_read_metadata(cp, &md); 3007 g_detach(cp); 3008 g_destroy_consumer(cp); 3009 g_destroy_geom(gp); 3010 if (error != 0) 3011 return (NULL); 3012 gp = NULL; 3013 3014 if (md.md_provider[0] != '\0' && 3015 !g_compare_names(md.md_provider, pp->name)) 3016 return (NULL); 3017 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3018 return (NULL); 3019 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3020 G_MIRROR_DEBUG(0, 3021 "Device %s: provider %s marked as inactive, skipping.", 3022 md.md_name, pp->name); 3023 return (NULL); 3024 } 3025 if (g_mirror_debug >= 2) 3026 mirror_metadata_dump(&md); 3027 3028 /* 3029 * Let's check if device already exists. 3030 */ 3031 sc = NULL; 3032 LIST_FOREACH(gp, &mp->geom, geom) { 3033 sc = gp->softc; 3034 if (sc == NULL) 3035 continue; 3036 if (sc->sc_sync.ds_geom == gp) 3037 continue; 3038 if (strcmp(md.md_name, sc->sc_name) != 0) 3039 continue; 3040 if (md.md_mid != sc->sc_id) { 3041 G_MIRROR_DEBUG(0, "Device %s already configured.", 3042 sc->sc_name); 3043 return (NULL); 3044 } 3045 break; 3046 } 3047 if (gp == NULL) { 3048 gp = g_mirror_create(mp, &md); 3049 if (gp == NULL) { 3050 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3051 md.md_name); 3052 return (NULL); 3053 } 3054 sc = gp->softc; 3055 } 3056 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3057 g_topology_unlock(); 3058 sx_xlock(&sc->sc_lock); 3059 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3060 error = g_mirror_add_disk(sc, pp, &md); 3061 if (error != 0) { 3062 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3063 pp->name, gp->name, error); 3064 if (LIST_EMPTY(&sc->sc_disks)) { 3065 g_cancel_event(sc); 3066 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3067 g_topology_lock(); 3068 return (NULL); 3069 } 3070 gp = NULL; 3071 } 3072 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3073 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3074 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3075 g_topology_lock(); 3076 return (NULL); 3077 } 3078 sx_xunlock(&sc->sc_lock); 3079 g_topology_lock(); 3080 return (gp); 3081} 3082 3083static int 3084g_mirror_destroy_geom(struct gctl_req *req __unused, 3085 struct g_class *mp __unused, struct g_geom *gp) 3086{ 3087 struct g_mirror_softc *sc; 3088 int error; 3089 3090 g_topology_unlock(); 3091 sc = gp->softc; 3092 sx_xlock(&sc->sc_lock); 3093 g_cancel_event(sc); 3094 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3095 if (error != 0) 3096 sx_xunlock(&sc->sc_lock); 3097 g_topology_lock(); 3098 return (error); 3099} 3100 3101static void 3102g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3103 struct g_consumer *cp, struct g_provider *pp) 3104{ 3105 struct g_mirror_softc *sc; 3106 3107 g_topology_assert(); 3108 3109 sc = gp->softc; 3110 if (sc == NULL) 3111 return; 3112 /* Skip synchronization geom. */ 3113 if (gp == sc->sc_sync.ds_geom) 3114 return; 3115 if (pp != NULL) { 3116 /* Nothing here. */ 3117 } else if (cp != NULL) { 3118 struct g_mirror_disk *disk; 3119 3120 disk = cp->private; 3121 if (disk == NULL) 3122 return; 3123 g_topology_unlock(); 3124 sx_xlock(&sc->sc_lock); 3125 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3126 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3127 sbuf_printf(sb, "%s<Synchronized>", indent); 3128 if (disk->d_sync.ds_offset == 0) 3129 sbuf_printf(sb, "0%%"); 3130 else { 3131 sbuf_printf(sb, "%u%%", 3132 (u_int)((disk->d_sync.ds_offset * 100) / 3133 sc->sc_provider->mediasize)); 3134 } 3135 sbuf_printf(sb, "</Synchronized>\n"); 3136 } 3137 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3138 disk->d_sync.ds_syncid); 3139 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3140 disk->d_genid); 3141 sbuf_printf(sb, "%s<Flags>", indent); 3142 if (disk->d_flags == 0) 3143 sbuf_printf(sb, "NONE"); 3144 else { 3145 int first = 1; 3146 3147#define ADD_FLAG(flag, name) do { \ 3148 if ((disk->d_flags & (flag)) != 0) { \ 3149 if (!first) \ 3150 sbuf_printf(sb, ", "); \ 3151 else \ 3152 first = 0; \ 3153 sbuf_printf(sb, name); \ 3154 } \ 3155} while (0) 3156 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3157 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3158 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3159 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3160 "SYNCHRONIZING"); 3161 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3162 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3163#undef ADD_FLAG 3164 } 3165 sbuf_printf(sb, "</Flags>\n"); 3166 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3167 disk->d_priority); 3168 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3169 g_mirror_disk_state2str(disk->d_state)); 3170 sx_xunlock(&sc->sc_lock); 3171 g_topology_lock(); 3172 } else { 3173 g_topology_unlock(); 3174 sx_xlock(&sc->sc_lock); 3175 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3176 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3177 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3178 sbuf_printf(sb, "%s<Flags>", indent); 3179 if (sc->sc_flags == 0) 3180 sbuf_printf(sb, "NONE"); 3181 else { 3182 int first = 1; 3183 3184#define ADD_FLAG(flag, name) do { \ 3185 if ((sc->sc_flags & (flag)) != 0) { \ 3186 if (!first) \ 3187 sbuf_printf(sb, ", "); \ 3188 else \ 3189 first = 0; \ 3190 sbuf_printf(sb, name); \ 3191 } \ 3192} while (0) 3193 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3194 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3195#undef ADD_FLAG 3196 } 3197 sbuf_printf(sb, "</Flags>\n"); 3198 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3199 (u_int)sc->sc_slice); 3200 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3201 balance_name(sc->sc_balance)); 3202 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3203 sc->sc_ndisks); 3204 sbuf_printf(sb, "%s<State>", indent); 3205 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3206 sbuf_printf(sb, "%s", "STARTING"); 3207 else if (sc->sc_ndisks == 3208 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3209 sbuf_printf(sb, "%s", "COMPLETE"); 3210 else 3211 sbuf_printf(sb, "%s", "DEGRADED"); 3212 sbuf_printf(sb, "</State>\n"); 3213 sx_xunlock(&sc->sc_lock); 3214 g_topology_lock(); 3215 } 3216} 3217 3218static void 3219g_mirror_shutdown_pre_sync(void *arg, int howto) 3220{ 3221 struct g_class *mp; 3222 struct g_geom *gp, *gp2; 3223 struct g_mirror_softc *sc; 3224 int error; 3225 3226 mp = arg; 3227 DROP_GIANT(); 3228 g_topology_lock(); 3229 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3230 if ((sc = gp->softc) == NULL) 3231 continue; 3232 /* Skip synchronization geom. */ 3233 if (gp == sc->sc_sync.ds_geom) 3234 continue; 3235 g_topology_unlock(); 3236 sx_xlock(&sc->sc_lock); 3237 g_cancel_event(sc); 3238 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3239 if (error != 0) 3240 sx_xunlock(&sc->sc_lock); 3241 g_topology_lock(); 3242 } 3243 g_topology_unlock(); 3244 PICKUP_GIANT(); 3245} 3246 3247static void 3248g_mirror_init(struct g_class *mp) 3249{ 3250 3251 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3252 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3253 if (g_mirror_pre_sync == NULL) 3254 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3255} 3256 3257static void 3258g_mirror_fini(struct g_class *mp) 3259{ 3260 3261 if (g_mirror_pre_sync != NULL) 3262 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync); 3263} 3264 3265DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3266