g_mirror.c revision 328333
1/*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/geom/mirror/g_mirror.c 328333 2018-01-24 15:15:18Z markj $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bio.h> 33#include <sys/eventhandler.h> 34#include <sys/fail.h> 35#include <sys/kernel.h> 36#include <sys/kthread.h> 37#include <sys/limits.h> 38#include <sys/lock.h> 39#include <sys/malloc.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/sbuf.h> 43#include <sys/sched.h> 44#include <sys/sx.h> 45#include <sys/sysctl.h> 46 47#include <geom/geom.h> 48#include <geom/mirror/g_mirror.h> 49 50FEATURE(geom_mirror, "GEOM mirroring support"); 51 52static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 53 54SYSCTL_DECL(_kern_geom); 55static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 56 "GEOM_MIRROR stuff"); 57int g_mirror_debug = 0; 58SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 59 "Debug level"); 60static u_int g_mirror_timeout = 4; 61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 62 0, "Time to wait on all mirror components"); 63static u_int g_mirror_idletime = 5; 64SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 65 &g_mirror_idletime, 0, "Mark components as clean when idling"); 66static u_int g_mirror_disconnect_on_failure = 1; 67SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 68 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 69static u_int g_mirror_syncreqs = 2; 70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 71 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 72static u_int g_mirror_sync_period = 5; 73SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN, 74 &g_mirror_sync_period, 0, 75 "Metadata update period during synchronization, in seconds"); 76 77#define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 78 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 79 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 80 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 81} while (0) 82 83static eventhandler_tag g_mirror_post_sync = NULL; 84static int g_mirror_shutdown = 0; 85 86static g_ctl_destroy_geom_t g_mirror_destroy_geom; 87static g_taste_t g_mirror_taste; 88static g_init_t g_mirror_init; 89static g_fini_t g_mirror_fini; 90static g_provgone_t g_mirror_providergone; 91static g_resize_t g_mirror_resize; 92 93struct g_class g_mirror_class = { 94 .name = G_MIRROR_CLASS_NAME, 95 .version = G_VERSION, 96 .ctlreq = g_mirror_config, 97 .taste = g_mirror_taste, 98 .destroy_geom = g_mirror_destroy_geom, 99 .init = g_mirror_init, 100 .fini = g_mirror_fini, 101 .providergone = g_mirror_providergone, 102 .resize = g_mirror_resize 103}; 104 105 106static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 107static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 108static void g_mirror_update_device(struct g_mirror_softc *sc, bool force); 109static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 110 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 111static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 112static void g_mirror_register_request(struct g_mirror_softc *sc, 113 struct bio *bp); 114static void g_mirror_sync_release(struct g_mirror_softc *sc); 115 116 117static const char * 118g_mirror_disk_state2str(int state) 119{ 120 121 switch (state) { 122 case G_MIRROR_DISK_STATE_NONE: 123 return ("NONE"); 124 case G_MIRROR_DISK_STATE_NEW: 125 return ("NEW"); 126 case G_MIRROR_DISK_STATE_ACTIVE: 127 return ("ACTIVE"); 128 case G_MIRROR_DISK_STATE_STALE: 129 return ("STALE"); 130 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 131 return ("SYNCHRONIZING"); 132 case G_MIRROR_DISK_STATE_DISCONNECTED: 133 return ("DISCONNECTED"); 134 case G_MIRROR_DISK_STATE_DESTROY: 135 return ("DESTROY"); 136 default: 137 return ("INVALID"); 138 } 139} 140 141static const char * 142g_mirror_device_state2str(int state) 143{ 144 145 switch (state) { 146 case G_MIRROR_DEVICE_STATE_STARTING: 147 return ("STARTING"); 148 case G_MIRROR_DEVICE_STATE_RUNNING: 149 return ("RUNNING"); 150 default: 151 return ("INVALID"); 152 } 153} 154 155static const char * 156g_mirror_get_diskname(struct g_mirror_disk *disk) 157{ 158 159 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 160 return ("[unknown]"); 161 return (disk->d_name); 162} 163 164/* 165 * --- Events handling functions --- 166 * Events in geom_mirror are used to maintain disks and device status 167 * from one thread to simplify locking. 168 */ 169static void 170g_mirror_event_free(struct g_mirror_event *ep) 171{ 172 173 free(ep, M_MIRROR); 174} 175 176int 177g_mirror_event_send(void *arg, int state, int flags) 178{ 179 struct g_mirror_softc *sc; 180 struct g_mirror_disk *disk; 181 struct g_mirror_event *ep; 182 int error; 183 184 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 185 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 186 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 187 disk = NULL; 188 sc = arg; 189 } else { 190 disk = arg; 191 sc = disk->d_softc; 192 } 193 ep->e_disk = disk; 194 ep->e_state = state; 195 ep->e_flags = flags; 196 ep->e_error = 0; 197 mtx_lock(&sc->sc_events_mtx); 198 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 199 mtx_unlock(&sc->sc_events_mtx); 200 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 201 mtx_lock(&sc->sc_queue_mtx); 202 wakeup(sc); 203 mtx_unlock(&sc->sc_queue_mtx); 204 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 205 return (0); 206 sx_assert(&sc->sc_lock, SX_XLOCKED); 207 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 208 sx_xunlock(&sc->sc_lock); 209 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 210 mtx_lock(&sc->sc_events_mtx); 211 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 212 hz * 5); 213 } 214 error = ep->e_error; 215 g_mirror_event_free(ep); 216 sx_xlock(&sc->sc_lock); 217 return (error); 218} 219 220static struct g_mirror_event * 221g_mirror_event_first(struct g_mirror_softc *sc) 222{ 223 struct g_mirror_event *ep; 224 225 mtx_lock(&sc->sc_events_mtx); 226 ep = TAILQ_FIRST(&sc->sc_events); 227 mtx_unlock(&sc->sc_events_mtx); 228 return (ep); 229} 230 231static void 232g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 233{ 234 235 mtx_lock(&sc->sc_events_mtx); 236 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 237 mtx_unlock(&sc->sc_events_mtx); 238} 239 240static void 241g_mirror_event_cancel(struct g_mirror_disk *disk) 242{ 243 struct g_mirror_softc *sc; 244 struct g_mirror_event *ep, *tmpep; 245 246 sc = disk->d_softc; 247 sx_assert(&sc->sc_lock, SX_XLOCKED); 248 249 mtx_lock(&sc->sc_events_mtx); 250 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 251 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 252 continue; 253 if (ep->e_disk != disk) 254 continue; 255 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 256 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 257 g_mirror_event_free(ep); 258 else { 259 ep->e_error = ECANCELED; 260 wakeup(ep); 261 } 262 } 263 mtx_unlock(&sc->sc_events_mtx); 264} 265 266/* 267 * Return the number of disks in given state. 268 * If state is equal to -1, count all connected disks. 269 */ 270u_int 271g_mirror_ndisks(struct g_mirror_softc *sc, int state) 272{ 273 struct g_mirror_disk *disk; 274 u_int n = 0; 275 276 sx_assert(&sc->sc_lock, SX_LOCKED); 277 278 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 279 if (state == -1 || disk->d_state == state) 280 n++; 281 } 282 return (n); 283} 284 285/* 286 * Find a disk in mirror by its disk ID. 287 */ 288static struct g_mirror_disk * 289g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 290{ 291 struct g_mirror_disk *disk; 292 293 sx_assert(&sc->sc_lock, SX_XLOCKED); 294 295 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 296 if (disk->d_id == id) 297 return (disk); 298 } 299 return (NULL); 300} 301 302static u_int 303g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 304{ 305 struct bio *bp; 306 u_int nreqs = 0; 307 308 mtx_lock(&sc->sc_queue_mtx); 309 TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) { 310 if (bp->bio_from == cp) 311 nreqs++; 312 } 313 mtx_unlock(&sc->sc_queue_mtx); 314 return (nreqs); 315} 316 317static int 318g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 319{ 320 321 if (cp->index > 0) { 322 G_MIRROR_DEBUG(2, 323 "I/O requests for %s exist, can't destroy it now.", 324 cp->provider->name); 325 return (1); 326 } 327 if (g_mirror_nrequests(sc, cp) > 0) { 328 G_MIRROR_DEBUG(2, 329 "I/O requests for %s in queue, can't destroy it now.", 330 cp->provider->name); 331 return (1); 332 } 333 return (0); 334} 335 336static void 337g_mirror_destroy_consumer(void *arg, int flags __unused) 338{ 339 struct g_consumer *cp; 340 341 g_topology_assert(); 342 343 cp = arg; 344 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 345 g_detach(cp); 346 g_destroy_consumer(cp); 347} 348 349static void 350g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 351{ 352 struct g_provider *pp; 353 int retaste_wait; 354 355 g_topology_assert(); 356 357 cp->private = NULL; 358 if (g_mirror_is_busy(sc, cp)) 359 return; 360 pp = cp->provider; 361 retaste_wait = 0; 362 if (cp->acw == 1) { 363 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 364 retaste_wait = 1; 365 } 366 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 367 -cp->acw, -cp->ace, 0); 368 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 369 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 370 if (retaste_wait) { 371 /* 372 * After retaste event was send (inside g_access()), we can send 373 * event to detach and destroy consumer. 374 * A class, which has consumer to the given provider connected 375 * will not receive retaste event for the provider. 376 * This is the way how I ignore retaste events when I close 377 * consumers opened for write: I detach and destroy consumer 378 * after retaste event is sent. 379 */ 380 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 381 return; 382 } 383 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 384 g_detach(cp); 385 g_destroy_consumer(cp); 386} 387 388static int 389g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 390{ 391 struct g_consumer *cp; 392 int error; 393 394 g_topology_assert_not(); 395 KASSERT(disk->d_consumer == NULL, 396 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 397 398 g_topology_lock(); 399 cp = g_new_consumer(disk->d_softc->sc_geom); 400 cp->flags |= G_CF_DIRECT_RECEIVE; 401 error = g_attach(cp, pp); 402 if (error != 0) { 403 g_destroy_consumer(cp); 404 g_topology_unlock(); 405 return (error); 406 } 407 error = g_access(cp, 1, 1, 1); 408 if (error != 0) { 409 g_detach(cp); 410 g_destroy_consumer(cp); 411 g_topology_unlock(); 412 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 413 pp->name, error); 414 return (error); 415 } 416 g_topology_unlock(); 417 disk->d_consumer = cp; 418 disk->d_consumer->private = disk; 419 disk->d_consumer->index = 0; 420 421 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 422 return (0); 423} 424 425static void 426g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 427{ 428 429 g_topology_assert(); 430 431 if (cp == NULL) 432 return; 433 if (cp->provider != NULL) 434 g_mirror_kill_consumer(sc, cp); 435 else 436 g_destroy_consumer(cp); 437} 438 439/* 440 * Initialize disk. This means allocate memory, create consumer, attach it 441 * to the provider and open access (r1w1e1) to it. 442 */ 443static struct g_mirror_disk * 444g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 445 struct g_mirror_metadata *md, int *errorp) 446{ 447 struct g_mirror_disk *disk; 448 int i, error; 449 450 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 451 if (disk == NULL) { 452 error = ENOMEM; 453 goto fail; 454 } 455 disk->d_softc = sc; 456 error = g_mirror_connect_disk(disk, pp); 457 if (error != 0) 458 goto fail; 459 disk->d_id = md->md_did; 460 disk->d_state = G_MIRROR_DISK_STATE_NONE; 461 disk->d_priority = md->md_priority; 462 disk->d_flags = md->md_dflags; 463 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 464 if (error == 0 && i != 0) 465 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 466 if (md->md_provider[0] != '\0') 467 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 468 disk->d_sync.ds_consumer = NULL; 469 disk->d_sync.ds_offset = md->md_sync_offset; 470 disk->d_sync.ds_offset_done = md->md_sync_offset; 471 disk->d_sync.ds_update_ts = time_uptime; 472 disk->d_genid = md->md_genid; 473 disk->d_sync.ds_syncid = md->md_syncid; 474 if (errorp != NULL) 475 *errorp = 0; 476 return (disk); 477fail: 478 if (errorp != NULL) 479 *errorp = error; 480 if (disk != NULL) 481 free(disk, M_MIRROR); 482 return (NULL); 483} 484 485static void 486g_mirror_destroy_disk(struct g_mirror_disk *disk) 487{ 488 struct g_mirror_softc *sc; 489 490 g_topology_assert_not(); 491 sc = disk->d_softc; 492 sx_assert(&sc->sc_lock, SX_XLOCKED); 493 494 LIST_REMOVE(disk, d_next); 495 g_mirror_event_cancel(disk); 496 if (sc->sc_hint == disk) 497 sc->sc_hint = NULL; 498 switch (disk->d_state) { 499 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 500 g_mirror_sync_stop(disk, 1); 501 /* FALLTHROUGH */ 502 case G_MIRROR_DISK_STATE_NEW: 503 case G_MIRROR_DISK_STATE_STALE: 504 case G_MIRROR_DISK_STATE_ACTIVE: 505 g_topology_lock(); 506 g_mirror_disconnect_consumer(sc, disk->d_consumer); 507 g_topology_unlock(); 508 free(disk, M_MIRROR); 509 break; 510 default: 511 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 512 g_mirror_get_diskname(disk), 513 g_mirror_disk_state2str(disk->d_state))); 514 } 515} 516 517static void 518g_mirror_free_device(struct g_mirror_softc *sc) 519{ 520 521 mtx_destroy(&sc->sc_queue_mtx); 522 mtx_destroy(&sc->sc_events_mtx); 523 mtx_destroy(&sc->sc_done_mtx); 524 sx_destroy(&sc->sc_lock); 525 free(sc, M_MIRROR); 526} 527 528static void 529g_mirror_providergone(struct g_provider *pp) 530{ 531 struct g_mirror_softc *sc = pp->private; 532 533 if ((--sc->sc_refcnt) == 0) 534 g_mirror_free_device(sc); 535} 536 537static void 538g_mirror_destroy_device(struct g_mirror_softc *sc) 539{ 540 struct g_mirror_disk *disk; 541 struct g_mirror_event *ep; 542 struct g_geom *gp; 543 struct g_consumer *cp, *tmpcp; 544 545 g_topology_assert_not(); 546 sx_assert(&sc->sc_lock, SX_XLOCKED); 547 548 gp = sc->sc_geom; 549 if (sc->sc_provider != NULL) 550 g_mirror_destroy_provider(sc); 551 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 552 disk = LIST_FIRST(&sc->sc_disks)) { 553 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 554 g_mirror_update_metadata(disk); 555 g_mirror_destroy_disk(disk); 556 } 557 while ((ep = g_mirror_event_first(sc)) != NULL) { 558 g_mirror_event_remove(sc, ep); 559 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 560 g_mirror_event_free(ep); 561 else { 562 ep->e_error = ECANCELED; 563 ep->e_flags |= G_MIRROR_EVENT_DONE; 564 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 565 mtx_lock(&sc->sc_events_mtx); 566 wakeup(ep); 567 mtx_unlock(&sc->sc_events_mtx); 568 } 569 } 570 callout_drain(&sc->sc_callout); 571 572 g_topology_lock(); 573 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 574 g_mirror_disconnect_consumer(sc, cp); 575 } 576 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 577 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 578 g_wither_geom(gp, ENXIO); 579 sx_xunlock(&sc->sc_lock); 580 if ((--sc->sc_refcnt) == 0) 581 g_mirror_free_device(sc); 582 g_topology_unlock(); 583} 584 585static void 586g_mirror_orphan(struct g_consumer *cp) 587{ 588 struct g_mirror_disk *disk; 589 590 g_topology_assert(); 591 592 disk = cp->private; 593 if (disk == NULL) 594 return; 595 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 596 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 597 G_MIRROR_EVENT_DONTWAIT); 598} 599 600/* 601 * Function should return the next active disk on the list. 602 * It is possible that it will be the same disk as given. 603 * If there are no active disks on list, NULL is returned. 604 */ 605static __inline struct g_mirror_disk * 606g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 607{ 608 struct g_mirror_disk *dp; 609 610 for (dp = LIST_NEXT(disk, d_next); dp != disk; 611 dp = LIST_NEXT(dp, d_next)) { 612 if (dp == NULL) 613 dp = LIST_FIRST(&sc->sc_disks); 614 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 615 break; 616 } 617 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 618 return (NULL); 619 return (dp); 620} 621 622static struct g_mirror_disk * 623g_mirror_get_disk(struct g_mirror_softc *sc) 624{ 625 struct g_mirror_disk *disk; 626 627 if (sc->sc_hint == NULL) { 628 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 629 if (sc->sc_hint == NULL) 630 return (NULL); 631 } 632 disk = sc->sc_hint; 633 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 634 disk = g_mirror_find_next(sc, disk); 635 if (disk == NULL) 636 return (NULL); 637 } 638 sc->sc_hint = g_mirror_find_next(sc, disk); 639 return (disk); 640} 641 642static int 643g_mirror_write_metadata(struct g_mirror_disk *disk, 644 struct g_mirror_metadata *md) 645{ 646 struct g_mirror_softc *sc; 647 struct g_consumer *cp; 648 off_t offset, length; 649 u_char *sector; 650 int error = 0; 651 652 g_topology_assert_not(); 653 sc = disk->d_softc; 654 sx_assert(&sc->sc_lock, SX_LOCKED); 655 656 cp = disk->d_consumer; 657 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 658 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 659 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 660 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 661 cp->acw, cp->ace)); 662 length = cp->provider->sectorsize; 663 offset = cp->provider->mediasize - length; 664 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 665 if (md != NULL && 666 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 667 /* 668 * Handle the case, when the size of parent provider reduced. 669 */ 670 if (offset < md->md_mediasize) 671 error = ENOSPC; 672 else 673 mirror_metadata_encode(md, sector); 674 } 675 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 676 if (error == 0) 677 error = g_write_data(cp, offset, sector, length); 678 free(sector, M_MIRROR); 679 if (error != 0) { 680 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 681 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 682 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 683 "(device=%s, error=%d).", 684 g_mirror_get_diskname(disk), sc->sc_name, error); 685 } else { 686 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 687 "(device=%s, error=%d).", 688 g_mirror_get_diskname(disk), sc->sc_name, error); 689 } 690 if (g_mirror_disconnect_on_failure && 691 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 692 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 693 g_mirror_event_send(disk, 694 G_MIRROR_DISK_STATE_DISCONNECTED, 695 G_MIRROR_EVENT_DONTWAIT); 696 } 697 } 698 return (error); 699} 700 701static int 702g_mirror_clear_metadata(struct g_mirror_disk *disk) 703{ 704 int error; 705 706 g_topology_assert_not(); 707 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 708 709 if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 710 return (0); 711 error = g_mirror_write_metadata(disk, NULL); 712 if (error == 0) { 713 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 714 g_mirror_get_diskname(disk)); 715 } else { 716 G_MIRROR_DEBUG(0, 717 "Cannot clear metadata on disk %s (error=%d).", 718 g_mirror_get_diskname(disk), error); 719 } 720 return (error); 721} 722 723void 724g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 725 struct g_mirror_metadata *md) 726{ 727 728 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 729 md->md_version = G_MIRROR_VERSION; 730 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 731 md->md_mid = sc->sc_id; 732 md->md_all = sc->sc_ndisks; 733 md->md_slice = sc->sc_slice; 734 md->md_balance = sc->sc_balance; 735 md->md_genid = sc->sc_genid; 736 md->md_mediasize = sc->sc_mediasize; 737 md->md_sectorsize = sc->sc_sectorsize; 738 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 739 bzero(md->md_provider, sizeof(md->md_provider)); 740 if (disk == NULL) { 741 md->md_did = arc4random(); 742 md->md_priority = 0; 743 md->md_syncid = 0; 744 md->md_dflags = 0; 745 md->md_sync_offset = 0; 746 md->md_provsize = 0; 747 } else { 748 md->md_did = disk->d_id; 749 md->md_priority = disk->d_priority; 750 md->md_syncid = disk->d_sync.ds_syncid; 751 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 752 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 753 md->md_sync_offset = disk->d_sync.ds_offset_done; 754 else 755 md->md_sync_offset = 0; 756 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 757 strlcpy(md->md_provider, 758 disk->d_consumer->provider->name, 759 sizeof(md->md_provider)); 760 } 761 md->md_provsize = disk->d_consumer->provider->mediasize; 762 } 763} 764 765void 766g_mirror_update_metadata(struct g_mirror_disk *disk) 767{ 768 struct g_mirror_softc *sc; 769 struct g_mirror_metadata md; 770 int error; 771 772 g_topology_assert_not(); 773 sc = disk->d_softc; 774 sx_assert(&sc->sc_lock, SX_LOCKED); 775 776 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 777 return; 778 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 779 g_mirror_fill_metadata(sc, disk, &md); 780 error = g_mirror_write_metadata(disk, &md); 781 if (error == 0) { 782 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 783 g_mirror_get_diskname(disk)); 784 } else { 785 G_MIRROR_DEBUG(0, 786 "Cannot update metadata on disk %s (error=%d).", 787 g_mirror_get_diskname(disk), error); 788 } 789} 790 791static void 792g_mirror_bump_syncid(struct g_mirror_softc *sc) 793{ 794 struct g_mirror_disk *disk; 795 796 g_topology_assert_not(); 797 sx_assert(&sc->sc_lock, SX_XLOCKED); 798 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 799 ("%s called with no active disks (device=%s).", __func__, 800 sc->sc_name)); 801 802 sc->sc_syncid++; 803 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 804 sc->sc_syncid); 805 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 806 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 807 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 808 disk->d_sync.ds_syncid = sc->sc_syncid; 809 g_mirror_update_metadata(disk); 810 } 811 } 812} 813 814static void 815g_mirror_bump_genid(struct g_mirror_softc *sc) 816{ 817 struct g_mirror_disk *disk; 818 819 g_topology_assert_not(); 820 sx_assert(&sc->sc_lock, SX_XLOCKED); 821 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 822 ("%s called with no active disks (device=%s).", __func__, 823 sc->sc_name)); 824 825 sc->sc_genid++; 826 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 827 sc->sc_genid); 828 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 829 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 830 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 831 disk->d_genid = sc->sc_genid; 832 g_mirror_update_metadata(disk); 833 } 834 } 835} 836 837static int 838g_mirror_idle(struct g_mirror_softc *sc, int acw) 839{ 840 struct g_mirror_disk *disk; 841 int timeout; 842 843 g_topology_assert_not(); 844 sx_assert(&sc->sc_lock, SX_XLOCKED); 845 846 if (sc->sc_provider == NULL) 847 return (0); 848 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 849 return (0); 850 if (sc->sc_idle) 851 return (0); 852 if (sc->sc_writes > 0) 853 return (0); 854 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 855 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 856 if (!g_mirror_shutdown && timeout > 0) 857 return (timeout); 858 } 859 sc->sc_idle = 1; 860 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 861 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 862 continue; 863 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 864 g_mirror_get_diskname(disk), sc->sc_name); 865 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 866 g_mirror_update_metadata(disk); 867 } 868 return (0); 869} 870 871static void 872g_mirror_unidle(struct g_mirror_softc *sc) 873{ 874 struct g_mirror_disk *disk; 875 876 g_topology_assert_not(); 877 sx_assert(&sc->sc_lock, SX_XLOCKED); 878 879 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 880 return; 881 sc->sc_idle = 0; 882 sc->sc_last_write = time_uptime; 883 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 884 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 885 continue; 886 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 887 g_mirror_get_diskname(disk), sc->sc_name); 888 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 889 g_mirror_update_metadata(disk); 890 } 891} 892 893static void 894g_mirror_done(struct bio *bp) 895{ 896 struct g_mirror_softc *sc; 897 898 sc = bp->bio_from->geom->softc; 899 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 900 mtx_lock(&sc->sc_queue_mtx); 901 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 902 mtx_unlock(&sc->sc_queue_mtx); 903 wakeup(sc); 904} 905 906static void 907g_mirror_regular_request_error(struct g_mirror_softc *sc, 908 struct g_mirror_disk *disk, struct bio *bp) 909{ 910 911 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == EOPNOTSUPP) 912 return; 913 914 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 915 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 916 G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).", 917 bp->bio_error); 918 } else { 919 G_MIRROR_LOGREQ(1, bp, "Request failed (error=%d).", 920 bp->bio_error); 921 } 922 if (g_mirror_disconnect_on_failure && 923 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 924 if (bp->bio_error == ENXIO && 925 bp->bio_cmd == BIO_READ) 926 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 927 else if (bp->bio_error == ENXIO) 928 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW; 929 else 930 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 931 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 932 G_MIRROR_EVENT_DONTWAIT); 933 } 934} 935 936static void 937g_mirror_regular_request(struct g_mirror_softc *sc, struct bio *bp) 938{ 939 struct g_mirror_disk *disk; 940 struct bio *pbp; 941 942 g_topology_assert_not(); 943 KASSERT(sc->sc_provider == bp->bio_parent->bio_to, 944 ("regular request %p with unexpected origin", bp)); 945 946 pbp = bp->bio_parent; 947 bp->bio_from->index--; 948 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) 949 sc->sc_writes--; 950 disk = bp->bio_from->private; 951 if (disk == NULL) { 952 g_topology_lock(); 953 g_mirror_kill_consumer(sc, bp->bio_from); 954 g_topology_unlock(); 955 } 956 957 switch (bp->bio_cmd) { 958 case BIO_READ: 959 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 960 bp->bio_error); 961 break; 962 case BIO_WRITE: 963 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 964 bp->bio_error); 965 break; 966 case BIO_DELETE: 967 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_delete, 968 bp->bio_error); 969 break; 970 case BIO_FLUSH: 971 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_flush, 972 bp->bio_error); 973 break; 974 } 975 976 pbp->bio_inbed++; 977 KASSERT(pbp->bio_inbed <= pbp->bio_children, 978 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 979 pbp->bio_children)); 980 if (bp->bio_error == 0 && pbp->bio_error == 0) { 981 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 982 g_destroy_bio(bp); 983 if (pbp->bio_children == pbp->bio_inbed) { 984 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 985 pbp->bio_completed = pbp->bio_length; 986 if (pbp->bio_cmd == BIO_WRITE || 987 pbp->bio_cmd == BIO_DELETE) { 988 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 989 /* Release delayed sync requests if possible. */ 990 g_mirror_sync_release(sc); 991 } 992 g_io_deliver(pbp, pbp->bio_error); 993 } 994 return; 995 } else if (bp->bio_error != 0) { 996 if (pbp->bio_error == 0) 997 pbp->bio_error = bp->bio_error; 998 if (disk != NULL) 999 g_mirror_regular_request_error(sc, disk, bp); 1000 switch (pbp->bio_cmd) { 1001 case BIO_DELETE: 1002 case BIO_WRITE: 1003 case BIO_FLUSH: 1004 pbp->bio_inbed--; 1005 pbp->bio_children--; 1006 break; 1007 } 1008 } 1009 g_destroy_bio(bp); 1010 1011 switch (pbp->bio_cmd) { 1012 case BIO_READ: 1013 if (pbp->bio_inbed < pbp->bio_children) 1014 break; 1015 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 1016 g_io_deliver(pbp, pbp->bio_error); 1017 else { 1018 pbp->bio_error = 0; 1019 mtx_lock(&sc->sc_queue_mtx); 1020 TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue); 1021 mtx_unlock(&sc->sc_queue_mtx); 1022 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1023 wakeup(sc); 1024 } 1025 break; 1026 case BIO_DELETE: 1027 case BIO_WRITE: 1028 case BIO_FLUSH: 1029 if (pbp->bio_children == 0) { 1030 /* 1031 * All requests failed. 1032 */ 1033 } else if (pbp->bio_inbed < pbp->bio_children) { 1034 /* Do nothing. */ 1035 break; 1036 } else if (pbp->bio_children == pbp->bio_inbed) { 1037 /* Some requests succeeded. */ 1038 pbp->bio_error = 0; 1039 pbp->bio_completed = pbp->bio_length; 1040 } 1041 if (pbp->bio_cmd == BIO_WRITE || pbp->bio_cmd == BIO_DELETE) { 1042 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 1043 /* Release delayed sync requests if possible. */ 1044 g_mirror_sync_release(sc); 1045 } 1046 g_io_deliver(pbp, pbp->bio_error); 1047 break; 1048 default: 1049 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1050 break; 1051 } 1052} 1053 1054static void 1055g_mirror_sync_done(struct bio *bp) 1056{ 1057 struct g_mirror_softc *sc; 1058 1059 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1060 sc = bp->bio_from->geom->softc; 1061 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1062 mtx_lock(&sc->sc_queue_mtx); 1063 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1064 mtx_unlock(&sc->sc_queue_mtx); 1065 wakeup(sc); 1066} 1067 1068static void 1069g_mirror_candelete(struct bio *bp) 1070{ 1071 struct g_mirror_softc *sc; 1072 struct g_mirror_disk *disk; 1073 int *val; 1074 1075 sc = bp->bio_to->private; 1076 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1077 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1078 break; 1079 } 1080 val = (int *)bp->bio_data; 1081 *val = (disk != NULL); 1082 g_io_deliver(bp, 0); 1083} 1084 1085static void 1086g_mirror_kernel_dump(struct bio *bp) 1087{ 1088 struct g_mirror_softc *sc; 1089 struct g_mirror_disk *disk; 1090 struct bio *cbp; 1091 struct g_kerneldump *gkd; 1092 1093 /* 1094 * We configure dumping to the first component, because this component 1095 * will be used for reading with 'prefer' balance algorithm. 1096 * If the component with the highest priority is currently disconnected 1097 * we will not be able to read the dump after the reboot if it will be 1098 * connected and synchronized later. Can we do something better? 1099 */ 1100 sc = bp->bio_to->private; 1101 disk = LIST_FIRST(&sc->sc_disks); 1102 1103 gkd = (struct g_kerneldump *)bp->bio_data; 1104 if (gkd->length > bp->bio_to->mediasize) 1105 gkd->length = bp->bio_to->mediasize; 1106 cbp = g_clone_bio(bp); 1107 if (cbp == NULL) { 1108 g_io_deliver(bp, ENOMEM); 1109 return; 1110 } 1111 cbp->bio_done = g_std_done; 1112 g_io_request(cbp, disk->d_consumer); 1113 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1114 g_mirror_get_diskname(disk)); 1115} 1116 1117static void 1118g_mirror_start(struct bio *bp) 1119{ 1120 struct g_mirror_softc *sc; 1121 1122 sc = bp->bio_to->private; 1123 /* 1124 * If sc == NULL or there are no valid disks, provider's error 1125 * should be set and g_mirror_start() should not be called at all. 1126 */ 1127 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1128 ("Provider's error should be set (error=%d)(mirror=%s).", 1129 bp->bio_to->error, bp->bio_to->name)); 1130 G_MIRROR_LOGREQ(3, bp, "Request received."); 1131 1132 switch (bp->bio_cmd) { 1133 case BIO_READ: 1134 case BIO_WRITE: 1135 case BIO_DELETE: 1136 case BIO_FLUSH: 1137 break; 1138 case BIO_GETATTR: 1139 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1140 g_mirror_candelete(bp); 1141 return; 1142 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1143 g_mirror_kernel_dump(bp); 1144 return; 1145 } 1146 /* FALLTHROUGH */ 1147 default: 1148 g_io_deliver(bp, EOPNOTSUPP); 1149 return; 1150 } 1151 mtx_lock(&sc->sc_queue_mtx); 1152 if (bp->bio_to->error != 0) { 1153 mtx_unlock(&sc->sc_queue_mtx); 1154 g_io_deliver(bp, bp->bio_to->error); 1155 return; 1156 } 1157 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1158 mtx_unlock(&sc->sc_queue_mtx); 1159 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1160 wakeup(sc); 1161} 1162 1163/* 1164 * Return TRUE if the given request is colliding with a in-progress 1165 * synchronization request. 1166 */ 1167static bool 1168g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1169{ 1170 struct g_mirror_disk *disk; 1171 struct bio *sbp; 1172 off_t rstart, rend, sstart, send; 1173 u_int i; 1174 1175 if (sc->sc_sync.ds_ndisks == 0) 1176 return (false); 1177 rstart = bp->bio_offset; 1178 rend = bp->bio_offset + bp->bio_length; 1179 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1180 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1181 continue; 1182 for (i = 0; i < g_mirror_syncreqs; i++) { 1183 sbp = disk->d_sync.ds_bios[i]; 1184 if (sbp == NULL) 1185 continue; 1186 sstart = sbp->bio_offset; 1187 send = sbp->bio_offset + sbp->bio_length; 1188 if (rend > sstart && rstart < send) 1189 return (true); 1190 } 1191 } 1192 return (false); 1193} 1194 1195/* 1196 * Return TRUE if the given sync request is colliding with a in-progress regular 1197 * request. 1198 */ 1199static bool 1200g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1201{ 1202 off_t rstart, rend, sstart, send; 1203 struct bio *bp; 1204 1205 if (sc->sc_sync.ds_ndisks == 0) 1206 return (false); 1207 sstart = sbp->bio_offset; 1208 send = sbp->bio_offset + sbp->bio_length; 1209 TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) { 1210 rstart = bp->bio_offset; 1211 rend = bp->bio_offset + bp->bio_length; 1212 if (rend > sstart && rstart < send) 1213 return (true); 1214 } 1215 return (false); 1216} 1217 1218/* 1219 * Puts regular request onto delayed queue. 1220 */ 1221static void 1222g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1223{ 1224 1225 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1226 TAILQ_INSERT_TAIL(&sc->sc_regular_delayed, bp, bio_queue); 1227} 1228 1229/* 1230 * Puts synchronization request onto delayed queue. 1231 */ 1232static void 1233g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1234{ 1235 1236 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1237 TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue); 1238} 1239 1240/* 1241 * Requeue delayed regular requests. 1242 */ 1243static void 1244g_mirror_regular_release(struct g_mirror_softc *sc) 1245{ 1246 struct bio *bp; 1247 1248 if ((bp = TAILQ_FIRST(&sc->sc_regular_delayed)) == NULL) 1249 return; 1250 if (g_mirror_sync_collision(sc, bp)) 1251 return; 1252 1253 G_MIRROR_DEBUG(2, "Requeuing regular requests after collision."); 1254 mtx_lock(&sc->sc_queue_mtx); 1255 TAILQ_CONCAT(&sc->sc_regular_delayed, &sc->sc_queue, bio_queue); 1256 TAILQ_SWAP(&sc->sc_regular_delayed, &sc->sc_queue, bio, bio_queue); 1257 mtx_unlock(&sc->sc_queue_mtx); 1258} 1259 1260/* 1261 * Releases delayed sync requests which don't collide anymore with regular 1262 * requests. 1263 */ 1264static void 1265g_mirror_sync_release(struct g_mirror_softc *sc) 1266{ 1267 struct bio *bp, *bp2; 1268 1269 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) { 1270 if (g_mirror_regular_collision(sc, bp)) 1271 continue; 1272 TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue); 1273 G_MIRROR_LOGREQ(2, bp, 1274 "Releasing delayed synchronization request."); 1275 g_io_request(bp, bp->bio_from); 1276 } 1277} 1278 1279/* 1280 * Free a synchronization request and clear its slot in the array. 1281 */ 1282static void 1283g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp) 1284{ 1285 int idx; 1286 1287 if (disk != NULL && disk->d_sync.ds_bios != NULL) { 1288 idx = (int)(uintptr_t)bp->bio_caller1; 1289 KASSERT(disk->d_sync.ds_bios[idx] == bp, 1290 ("unexpected sync BIO at %p:%d", disk, idx)); 1291 disk->d_sync.ds_bios[idx] = NULL; 1292 } 1293 free(bp->bio_data, M_MIRROR); 1294 g_destroy_bio(bp); 1295} 1296 1297/* 1298 * Handle synchronization requests. 1299 * Every synchronization request is two-steps process: first, READ request is 1300 * send to active provider and then WRITE request (with read data) to the provider 1301 * being synchronized. When WRITE is finished, new synchronization request is 1302 * send. 1303 */ 1304static void 1305g_mirror_sync_request(struct g_mirror_softc *sc, struct bio *bp) 1306{ 1307 struct g_mirror_disk *disk; 1308 struct g_mirror_disk_sync *sync; 1309 1310 KASSERT((bp->bio_cmd == BIO_READ && 1311 bp->bio_from->geom == sc->sc_sync.ds_geom) || 1312 (bp->bio_cmd == BIO_WRITE && bp->bio_from->geom == sc->sc_geom), 1313 ("Sync BIO %p with unexpected origin", bp)); 1314 1315 bp->bio_from->index--; 1316 disk = bp->bio_from->private; 1317 if (disk == NULL) { 1318 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1319 g_topology_lock(); 1320 g_mirror_kill_consumer(sc, bp->bio_from); 1321 g_topology_unlock(); 1322 g_mirror_sync_request_free(NULL, bp); 1323 sx_xlock(&sc->sc_lock); 1324 return; 1325 } 1326 1327 /* 1328 * Synchronization request. 1329 */ 1330 switch (bp->bio_cmd) { 1331 case BIO_READ: 1332 { 1333 struct g_consumer *cp; 1334 1335 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1336 bp->bio_error); 1337 1338 if (bp->bio_error != 0) { 1339 G_MIRROR_LOGREQ(0, bp, 1340 "Synchronization request failed (error=%d).", 1341 bp->bio_error); 1342 g_mirror_sync_request_free(disk, bp); 1343 return; 1344 } 1345 G_MIRROR_LOGREQ(3, bp, 1346 "Synchronization request half-finished."); 1347 bp->bio_cmd = BIO_WRITE; 1348 bp->bio_cflags = 0; 1349 cp = disk->d_consumer; 1350 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1351 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1352 cp->acr, cp->acw, cp->ace)); 1353 cp->index++; 1354 g_io_request(bp, cp); 1355 return; 1356 } 1357 case BIO_WRITE: 1358 { 1359 off_t offset; 1360 void *data; 1361 int i, idx; 1362 1363 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1364 bp->bio_error); 1365 1366 if (bp->bio_error != 0) { 1367 G_MIRROR_LOGREQ(0, bp, 1368 "Synchronization request failed (error=%d).", 1369 bp->bio_error); 1370 g_mirror_sync_request_free(disk, bp); 1371 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1372 g_mirror_event_send(disk, 1373 G_MIRROR_DISK_STATE_DISCONNECTED, 1374 G_MIRROR_EVENT_DONTWAIT); 1375 return; 1376 } 1377 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1378 sync = &disk->d_sync; 1379 if (sync->ds_offset >= sc->sc_mediasize || 1380 sync->ds_consumer == NULL || 1381 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1382 /* Don't send more synchronization requests. */ 1383 sync->ds_inflight--; 1384 g_mirror_sync_request_free(disk, bp); 1385 if (sync->ds_inflight > 0) 1386 return; 1387 if (sync->ds_consumer == NULL || 1388 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1389 return; 1390 } 1391 /* Disk up-to-date, activate it. */ 1392 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1393 G_MIRROR_EVENT_DONTWAIT); 1394 return; 1395 } 1396 1397 /* Send next synchronization request. */ 1398 data = bp->bio_data; 1399 idx = (int)(uintptr_t)bp->bio_caller1; 1400 g_reset_bio(bp); 1401 bp->bio_cmd = BIO_READ; 1402 bp->bio_offset = sync->ds_offset; 1403 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1404 sync->ds_offset += bp->bio_length; 1405 bp->bio_done = g_mirror_sync_done; 1406 bp->bio_data = data; 1407 bp->bio_from = sync->ds_consumer; 1408 bp->bio_to = sc->sc_provider; 1409 bp->bio_caller1 = (void *)(uintptr_t)idx; 1410 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1411 sync->ds_consumer->index++; 1412 /* 1413 * Delay the request if it is colliding with a regular request. 1414 */ 1415 if (g_mirror_regular_collision(sc, bp)) 1416 g_mirror_sync_delay(sc, bp); 1417 else 1418 g_io_request(bp, sync->ds_consumer); 1419 1420 /* Requeue delayed requests if possible. */ 1421 g_mirror_regular_release(sc); 1422 1423 /* Find the smallest offset */ 1424 offset = sc->sc_mediasize; 1425 for (i = 0; i < g_mirror_syncreqs; i++) { 1426 bp = sync->ds_bios[i]; 1427 if (bp != NULL && bp->bio_offset < offset) 1428 offset = bp->bio_offset; 1429 } 1430 if (g_mirror_sync_period > 0 && 1431 time_uptime - sync->ds_update_ts > g_mirror_sync_period) { 1432 sync->ds_offset_done = offset; 1433 g_mirror_update_metadata(disk); 1434 sync->ds_update_ts = time_uptime; 1435 } 1436 return; 1437 } 1438 default: 1439 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1440 bp->bio_cmd, sc->sc_name)); 1441 break; 1442 } 1443} 1444 1445static void 1446g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1447{ 1448 struct g_mirror_disk *disk; 1449 struct g_consumer *cp; 1450 struct bio *cbp; 1451 1452 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1453 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1454 break; 1455 } 1456 if (disk == NULL) { 1457 if (bp->bio_error == 0) 1458 bp->bio_error = ENXIO; 1459 g_io_deliver(bp, bp->bio_error); 1460 return; 1461 } 1462 cbp = g_clone_bio(bp); 1463 if (cbp == NULL) { 1464 if (bp->bio_error == 0) 1465 bp->bio_error = ENOMEM; 1466 g_io_deliver(bp, bp->bio_error); 1467 return; 1468 } 1469 /* 1470 * Fill in the component buf structure. 1471 */ 1472 cp = disk->d_consumer; 1473 cbp->bio_done = g_mirror_done; 1474 cbp->bio_to = cp->provider; 1475 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1476 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1477 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1478 cp->acw, cp->ace)); 1479 cp->index++; 1480 g_io_request(cbp, cp); 1481} 1482 1483static void 1484g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1485{ 1486 struct g_mirror_disk *disk; 1487 struct g_consumer *cp; 1488 struct bio *cbp; 1489 1490 disk = g_mirror_get_disk(sc); 1491 if (disk == NULL) { 1492 if (bp->bio_error == 0) 1493 bp->bio_error = ENXIO; 1494 g_io_deliver(bp, bp->bio_error); 1495 return; 1496 } 1497 cbp = g_clone_bio(bp); 1498 if (cbp == NULL) { 1499 if (bp->bio_error == 0) 1500 bp->bio_error = ENOMEM; 1501 g_io_deliver(bp, bp->bio_error); 1502 return; 1503 } 1504 /* 1505 * Fill in the component buf structure. 1506 */ 1507 cp = disk->d_consumer; 1508 cbp->bio_done = g_mirror_done; 1509 cbp->bio_to = cp->provider; 1510 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1511 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1512 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1513 cp->acw, cp->ace)); 1514 cp->index++; 1515 g_io_request(cbp, cp); 1516} 1517 1518#define TRACK_SIZE (1 * 1024 * 1024) 1519#define LOAD_SCALE 256 1520#define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1521 1522static void 1523g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1524{ 1525 struct g_mirror_disk *disk, *dp; 1526 struct g_consumer *cp; 1527 struct bio *cbp; 1528 int prio, best; 1529 1530 /* Find a disk with the smallest load. */ 1531 disk = NULL; 1532 best = INT_MAX; 1533 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1534 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1535 continue; 1536 prio = dp->load; 1537 /* If disk head is precisely in position - highly prefer it. */ 1538 if (dp->d_last_offset == bp->bio_offset) 1539 prio -= 2 * LOAD_SCALE; 1540 else 1541 /* If disk head is close to position - prefer it. */ 1542 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1543 prio -= 1 * LOAD_SCALE; 1544 if (prio <= best) { 1545 disk = dp; 1546 best = prio; 1547 } 1548 } 1549 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1550 cbp = g_clone_bio(bp); 1551 if (cbp == NULL) { 1552 if (bp->bio_error == 0) 1553 bp->bio_error = ENOMEM; 1554 g_io_deliver(bp, bp->bio_error); 1555 return; 1556 } 1557 /* 1558 * Fill in the component buf structure. 1559 */ 1560 cp = disk->d_consumer; 1561 cbp->bio_done = g_mirror_done; 1562 cbp->bio_to = cp->provider; 1563 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1564 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1565 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1566 cp->acw, cp->ace)); 1567 cp->index++; 1568 /* Remember last head position */ 1569 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1570 /* Update loads. */ 1571 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1572 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1573 dp->load * 7) / 8; 1574 } 1575 g_io_request(cbp, cp); 1576} 1577 1578static void 1579g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1580{ 1581 struct bio_queue queue; 1582 struct g_mirror_disk *disk; 1583 struct g_consumer *cp; 1584 struct bio *cbp; 1585 off_t left, mod, offset, slice; 1586 u_char *data; 1587 u_int ndisks; 1588 1589 if (bp->bio_length <= sc->sc_slice) { 1590 g_mirror_request_round_robin(sc, bp); 1591 return; 1592 } 1593 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1594 slice = bp->bio_length / ndisks; 1595 mod = slice % sc->sc_provider->sectorsize; 1596 if (mod != 0) 1597 slice += sc->sc_provider->sectorsize - mod; 1598 /* 1599 * Allocate all bios before sending any request, so we can 1600 * return ENOMEM in nice and clean way. 1601 */ 1602 left = bp->bio_length; 1603 offset = bp->bio_offset; 1604 data = bp->bio_data; 1605 TAILQ_INIT(&queue); 1606 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1607 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1608 continue; 1609 cbp = g_clone_bio(bp); 1610 if (cbp == NULL) { 1611 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1612 TAILQ_REMOVE(&queue, cbp, bio_queue); 1613 g_destroy_bio(cbp); 1614 } 1615 if (bp->bio_error == 0) 1616 bp->bio_error = ENOMEM; 1617 g_io_deliver(bp, bp->bio_error); 1618 return; 1619 } 1620 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1621 cbp->bio_done = g_mirror_done; 1622 cbp->bio_caller1 = disk; 1623 cbp->bio_to = disk->d_consumer->provider; 1624 cbp->bio_offset = offset; 1625 cbp->bio_data = data; 1626 cbp->bio_length = MIN(left, slice); 1627 left -= cbp->bio_length; 1628 if (left == 0) 1629 break; 1630 offset += cbp->bio_length; 1631 data += cbp->bio_length; 1632 } 1633 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1634 TAILQ_REMOVE(&queue, cbp, bio_queue); 1635 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1636 disk = cbp->bio_caller1; 1637 cbp->bio_caller1 = NULL; 1638 cp = disk->d_consumer; 1639 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1640 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1641 cp->acr, cp->acw, cp->ace)); 1642 disk->d_consumer->index++; 1643 g_io_request(cbp, disk->d_consumer); 1644 } 1645} 1646 1647static void 1648g_mirror_register_request(struct g_mirror_softc *sc, struct bio *bp) 1649{ 1650 struct bio_queue queue; 1651 struct bio *cbp; 1652 struct g_consumer *cp; 1653 struct g_mirror_disk *disk; 1654 1655 sx_assert(&sc->sc_lock, SA_XLOCKED); 1656 1657 /* 1658 * To avoid ordering issues, if a write is deferred because of a 1659 * collision with a sync request, all I/O is deferred until that 1660 * write is initiated. 1661 */ 1662 if (bp->bio_from->geom != sc->sc_sync.ds_geom && 1663 !TAILQ_EMPTY(&sc->sc_regular_delayed)) { 1664 g_mirror_regular_delay(sc, bp); 1665 return; 1666 } 1667 1668 switch (bp->bio_cmd) { 1669 case BIO_READ: 1670 switch (sc->sc_balance) { 1671 case G_MIRROR_BALANCE_LOAD: 1672 g_mirror_request_load(sc, bp); 1673 break; 1674 case G_MIRROR_BALANCE_PREFER: 1675 g_mirror_request_prefer(sc, bp); 1676 break; 1677 case G_MIRROR_BALANCE_ROUND_ROBIN: 1678 g_mirror_request_round_robin(sc, bp); 1679 break; 1680 case G_MIRROR_BALANCE_SPLIT: 1681 g_mirror_request_split(sc, bp); 1682 break; 1683 } 1684 return; 1685 case BIO_WRITE: 1686 case BIO_DELETE: 1687 /* 1688 * Delay the request if it is colliding with a synchronization 1689 * request. 1690 */ 1691 if (g_mirror_sync_collision(sc, bp)) { 1692 g_mirror_regular_delay(sc, bp); 1693 return; 1694 } 1695 1696 if (sc->sc_idle) 1697 g_mirror_unidle(sc); 1698 else 1699 sc->sc_last_write = time_uptime; 1700 1701 /* 1702 * Bump syncid on first write. 1703 */ 1704 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1705 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1706 g_mirror_bump_syncid(sc); 1707 } 1708 1709 /* 1710 * Allocate all bios before sending any request, so we can 1711 * return ENOMEM in nice and clean way. 1712 */ 1713 TAILQ_INIT(&queue); 1714 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1715 switch (disk->d_state) { 1716 case G_MIRROR_DISK_STATE_ACTIVE: 1717 break; 1718 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1719 if (bp->bio_offset >= disk->d_sync.ds_offset) 1720 continue; 1721 break; 1722 default: 1723 continue; 1724 } 1725 if (bp->bio_cmd == BIO_DELETE && 1726 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1727 continue; 1728 cbp = g_clone_bio(bp); 1729 if (cbp == NULL) { 1730 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1731 TAILQ_REMOVE(&queue, cbp, bio_queue); 1732 g_destroy_bio(cbp); 1733 } 1734 if (bp->bio_error == 0) 1735 bp->bio_error = ENOMEM; 1736 g_io_deliver(bp, bp->bio_error); 1737 return; 1738 } 1739 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1740 cbp->bio_done = g_mirror_done; 1741 cp = disk->d_consumer; 1742 cbp->bio_caller1 = cp; 1743 cbp->bio_to = cp->provider; 1744 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1745 ("Consumer %s not opened (r%dw%de%d).", 1746 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1747 } 1748 if (TAILQ_EMPTY(&queue)) { 1749 KASSERT(bp->bio_cmd == BIO_DELETE, 1750 ("No consumers for regular request %p", bp)); 1751 g_io_deliver(bp, EOPNOTSUPP); 1752 return; 1753 } 1754 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1755 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1756 TAILQ_REMOVE(&queue, cbp, bio_queue); 1757 cp = cbp->bio_caller1; 1758 cbp->bio_caller1 = NULL; 1759 cp->index++; 1760 sc->sc_writes++; 1761 g_io_request(cbp, cp); 1762 } 1763 /* 1764 * Put request onto inflight queue, so we can check if new 1765 * synchronization requests don't collide with it. 1766 */ 1767 TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue); 1768 return; 1769 case BIO_FLUSH: 1770 TAILQ_INIT(&queue); 1771 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1772 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1773 continue; 1774 cbp = g_clone_bio(bp); 1775 if (cbp == NULL) { 1776 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1777 TAILQ_REMOVE(&queue, cbp, bio_queue); 1778 g_destroy_bio(cbp); 1779 } 1780 if (bp->bio_error == 0) 1781 bp->bio_error = ENOMEM; 1782 g_io_deliver(bp, bp->bio_error); 1783 return; 1784 } 1785 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1786 cbp->bio_done = g_mirror_done; 1787 cbp->bio_caller1 = disk; 1788 cbp->bio_to = disk->d_consumer->provider; 1789 } 1790 KASSERT(!TAILQ_EMPTY(&queue), 1791 ("No consumers for regular request %p", bp)); 1792 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1793 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1794 TAILQ_REMOVE(&queue, cbp, bio_queue); 1795 disk = cbp->bio_caller1; 1796 cbp->bio_caller1 = NULL; 1797 cp = disk->d_consumer; 1798 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1799 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1800 cp->acr, cp->acw, cp->ace)); 1801 cp->index++; 1802 g_io_request(cbp, cp); 1803 } 1804 break; 1805 default: 1806 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1807 bp->bio_cmd, sc->sc_name)); 1808 break; 1809 } 1810} 1811 1812static int 1813g_mirror_can_destroy(struct g_mirror_softc *sc) 1814{ 1815 struct g_geom *gp; 1816 struct g_consumer *cp; 1817 1818 g_topology_assert(); 1819 gp = sc->sc_geom; 1820 if (gp->softc == NULL) 1821 return (1); 1822 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1823 return (0); 1824 LIST_FOREACH(cp, &gp->consumer, consumer) { 1825 if (g_mirror_is_busy(sc, cp)) 1826 return (0); 1827 } 1828 gp = sc->sc_sync.ds_geom; 1829 LIST_FOREACH(cp, &gp->consumer, consumer) { 1830 if (g_mirror_is_busy(sc, cp)) 1831 return (0); 1832 } 1833 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1834 sc->sc_name); 1835 return (1); 1836} 1837 1838static int 1839g_mirror_try_destroy(struct g_mirror_softc *sc) 1840{ 1841 1842 if (sc->sc_rootmount != NULL) { 1843 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1844 sc->sc_rootmount); 1845 root_mount_rel(sc->sc_rootmount); 1846 sc->sc_rootmount = NULL; 1847 } 1848 g_topology_lock(); 1849 if (!g_mirror_can_destroy(sc)) { 1850 g_topology_unlock(); 1851 return (0); 1852 } 1853 sc->sc_geom->softc = NULL; 1854 sc->sc_sync.ds_geom->softc = NULL; 1855 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) { 1856 g_topology_unlock(); 1857 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1858 &sc->sc_worker); 1859 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1860 sx_xunlock(&sc->sc_lock); 1861 wakeup(&sc->sc_worker); 1862 sc->sc_worker = NULL; 1863 } else { 1864 g_topology_unlock(); 1865 g_mirror_destroy_device(sc); 1866 } 1867 return (1); 1868} 1869 1870/* 1871 * Worker thread. 1872 */ 1873static void 1874g_mirror_worker(void *arg) 1875{ 1876 struct g_mirror_softc *sc; 1877 struct g_mirror_event *ep; 1878 struct bio *bp; 1879 int timeout; 1880 1881 sc = arg; 1882 thread_lock(curthread); 1883 sched_prio(curthread, PRIBIO); 1884 thread_unlock(curthread); 1885 1886 sx_xlock(&sc->sc_lock); 1887 for (;;) { 1888 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1889 /* 1890 * First take a look at events. 1891 * This is important to handle events before any I/O requests. 1892 */ 1893 ep = g_mirror_event_first(sc); 1894 if (ep != NULL) { 1895 g_mirror_event_remove(sc, ep); 1896 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1897 /* Update only device status. */ 1898 G_MIRROR_DEBUG(3, 1899 "Running event for device %s.", 1900 sc->sc_name); 1901 ep->e_error = 0; 1902 g_mirror_update_device(sc, true); 1903 } else { 1904 /* Update disk status. */ 1905 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1906 g_mirror_get_diskname(ep->e_disk)); 1907 ep->e_error = g_mirror_update_disk(ep->e_disk, 1908 ep->e_state); 1909 if (ep->e_error == 0) 1910 g_mirror_update_device(sc, false); 1911 } 1912 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1913 KASSERT(ep->e_error == 0, 1914 ("Error cannot be handled.")); 1915 g_mirror_event_free(ep); 1916 } else { 1917 ep->e_flags |= G_MIRROR_EVENT_DONE; 1918 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1919 ep); 1920 mtx_lock(&sc->sc_events_mtx); 1921 wakeup(ep); 1922 mtx_unlock(&sc->sc_events_mtx); 1923 } 1924 if ((sc->sc_flags & 1925 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1926 if (g_mirror_try_destroy(sc)) { 1927 curthread->td_pflags &= ~TDP_GEOM; 1928 G_MIRROR_DEBUG(1, "Thread exiting."); 1929 kproc_exit(0); 1930 } 1931 } 1932 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1933 continue; 1934 } 1935 1936 /* 1937 * Check if we can mark array as CLEAN and if we can't take 1938 * how much seconds should we wait. 1939 */ 1940 timeout = g_mirror_idle(sc, -1); 1941 1942 /* 1943 * Handle I/O requests. 1944 */ 1945 mtx_lock(&sc->sc_queue_mtx); 1946 bp = TAILQ_FIRST(&sc->sc_queue); 1947 if (bp != NULL) 1948 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 1949 else { 1950 if ((sc->sc_flags & 1951 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1952 mtx_unlock(&sc->sc_queue_mtx); 1953 if (g_mirror_try_destroy(sc)) { 1954 curthread->td_pflags &= ~TDP_GEOM; 1955 G_MIRROR_DEBUG(1, "Thread exiting."); 1956 kproc_exit(0); 1957 } 1958 mtx_lock(&sc->sc_queue_mtx); 1959 if (!TAILQ_EMPTY(&sc->sc_queue)) { 1960 mtx_unlock(&sc->sc_queue_mtx); 1961 continue; 1962 } 1963 } 1964 if (g_mirror_event_first(sc) != NULL) { 1965 mtx_unlock(&sc->sc_queue_mtx); 1966 continue; 1967 } 1968 sx_xunlock(&sc->sc_lock); 1969 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1970 timeout * hz); 1971 sx_xlock(&sc->sc_lock); 1972 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1973 continue; 1974 } 1975 mtx_unlock(&sc->sc_queue_mtx); 1976 1977 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1978 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1979 /* 1980 * Handle completion of the first half (the read) of a 1981 * block synchronization operation. 1982 */ 1983 g_mirror_sync_request(sc, bp); 1984 } else if (bp->bio_to != sc->sc_provider) { 1985 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1986 /* 1987 * Handle completion of a regular I/O request. 1988 */ 1989 g_mirror_regular_request(sc, bp); 1990 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1991 /* 1992 * Handle completion of the second half (the 1993 * write) of a block synchronization operation. 1994 */ 1995 g_mirror_sync_request(sc, bp); 1996 else { 1997 KASSERT(0, 1998 ("Invalid request cflags=0x%hx to=%s.", 1999 bp->bio_cflags, bp->bio_to->name)); 2000 } 2001 } else { 2002 /* 2003 * Initiate an I/O request. 2004 */ 2005 g_mirror_register_request(sc, bp); 2006 } 2007 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 2008 } 2009} 2010 2011static void 2012g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 2013{ 2014 2015 sx_assert(&sc->sc_lock, SX_LOCKED); 2016 2017 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 2018 return; 2019 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2020 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 2021 g_mirror_get_diskname(disk), sc->sc_name); 2022 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2023 } else if (sc->sc_idle && 2024 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2025 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 2026 g_mirror_get_diskname(disk), sc->sc_name); 2027 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2028 } 2029} 2030 2031static void 2032g_mirror_sync_start(struct g_mirror_disk *disk) 2033{ 2034 struct g_mirror_softc *sc; 2035 struct g_consumer *cp; 2036 struct bio *bp; 2037 int error, i; 2038 2039 g_topology_assert_not(); 2040 sc = disk->d_softc; 2041 sx_assert(&sc->sc_lock, SX_LOCKED); 2042 2043 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2044 ("Disk %s is not marked for synchronization.", 2045 g_mirror_get_diskname(disk))); 2046 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2047 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 2048 sc->sc_state)); 2049 2050 sx_xunlock(&sc->sc_lock); 2051 g_topology_lock(); 2052 cp = g_new_consumer(sc->sc_sync.ds_geom); 2053 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2054 error = g_attach(cp, sc->sc_provider); 2055 KASSERT(error == 0, 2056 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2057 error = g_access(cp, 1, 0, 0); 2058 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2059 g_topology_unlock(); 2060 sx_xlock(&sc->sc_lock); 2061 2062 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2063 g_mirror_get_diskname(disk)); 2064 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 2065 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2066 KASSERT(disk->d_sync.ds_consumer == NULL, 2067 ("Sync consumer already exists (device=%s, disk=%s).", 2068 sc->sc_name, g_mirror_get_diskname(disk))); 2069 2070 disk->d_sync.ds_consumer = cp; 2071 disk->d_sync.ds_consumer->private = disk; 2072 disk->d_sync.ds_consumer->index = 0; 2073 2074 /* 2075 * Allocate memory for synchronization bios and initialize them. 2076 */ 2077 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2078 M_MIRROR, M_WAITOK); 2079 for (i = 0; i < g_mirror_syncreqs; i++) { 2080 bp = g_alloc_bio(); 2081 disk->d_sync.ds_bios[i] = bp; 2082 bp->bio_parent = NULL; 2083 bp->bio_cmd = BIO_READ; 2084 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2085 bp->bio_cflags = 0; 2086 bp->bio_offset = disk->d_sync.ds_offset; 2087 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2088 disk->d_sync.ds_offset += bp->bio_length; 2089 bp->bio_done = g_mirror_sync_done; 2090 bp->bio_from = disk->d_sync.ds_consumer; 2091 bp->bio_to = sc->sc_provider; 2092 bp->bio_caller1 = (void *)(uintptr_t)i; 2093 } 2094 2095 /* Increase the number of disks in SYNCHRONIZING state. */ 2096 sc->sc_sync.ds_ndisks++; 2097 /* Set the number of in-flight synchronization requests. */ 2098 disk->d_sync.ds_inflight = g_mirror_syncreqs; 2099 2100 /* 2101 * Fire off first synchronization requests. 2102 */ 2103 for (i = 0; i < g_mirror_syncreqs; i++) { 2104 bp = disk->d_sync.ds_bios[i]; 2105 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2106 disk->d_sync.ds_consumer->index++; 2107 /* 2108 * Delay the request if it is colliding with a regular request. 2109 */ 2110 if (g_mirror_regular_collision(sc, bp)) 2111 g_mirror_sync_delay(sc, bp); 2112 else 2113 g_io_request(bp, disk->d_sync.ds_consumer); 2114 } 2115} 2116 2117/* 2118 * Stop synchronization process. 2119 * type: 0 - synchronization finished 2120 * 1 - synchronization stopped 2121 */ 2122static void 2123g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2124{ 2125 struct g_mirror_softc *sc; 2126 struct g_consumer *cp; 2127 2128 g_topology_assert_not(); 2129 sc = disk->d_softc; 2130 sx_assert(&sc->sc_lock, SX_LOCKED); 2131 2132 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2133 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2134 g_mirror_disk_state2str(disk->d_state))); 2135 if (disk->d_sync.ds_consumer == NULL) 2136 return; 2137 2138 if (type == 0) { 2139 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2140 sc->sc_name, g_mirror_get_diskname(disk)); 2141 } else /* if (type == 1) */ { 2142 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2143 sc->sc_name, g_mirror_get_diskname(disk)); 2144 } 2145 g_mirror_regular_release(sc); 2146 free(disk->d_sync.ds_bios, M_MIRROR); 2147 disk->d_sync.ds_bios = NULL; 2148 cp = disk->d_sync.ds_consumer; 2149 disk->d_sync.ds_consumer = NULL; 2150 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2151 sc->sc_sync.ds_ndisks--; 2152 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2153 g_topology_lock(); 2154 g_mirror_kill_consumer(sc, cp); 2155 g_topology_unlock(); 2156 sx_xlock(&sc->sc_lock); 2157} 2158 2159static void 2160g_mirror_launch_provider(struct g_mirror_softc *sc) 2161{ 2162 struct g_mirror_disk *disk; 2163 struct g_provider *pp, *dp; 2164 2165 sx_assert(&sc->sc_lock, SX_LOCKED); 2166 2167 g_topology_lock(); 2168 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2169 pp->flags |= G_PF_DIRECT_RECEIVE; 2170 pp->mediasize = sc->sc_mediasize; 2171 pp->sectorsize = sc->sc_sectorsize; 2172 pp->stripesize = 0; 2173 pp->stripeoffset = 0; 2174 2175 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2176 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2177 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2178 2179 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2180 if (disk->d_consumer && disk->d_consumer->provider) { 2181 dp = disk->d_consumer->provider; 2182 if (dp->stripesize > pp->stripesize) { 2183 pp->stripesize = dp->stripesize; 2184 pp->stripeoffset = dp->stripeoffset; 2185 } 2186 /* A provider underneath us doesn't support unmapped */ 2187 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2188 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2189 "because of %s.", dp->name); 2190 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2191 } 2192 } 2193 } 2194 pp->private = sc; 2195 sc->sc_refcnt++; 2196 sc->sc_provider = pp; 2197 g_error_provider(pp, 0); 2198 g_topology_unlock(); 2199 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2200 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2201 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2202 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2203 g_mirror_sync_start(disk); 2204 } 2205} 2206 2207static void 2208g_mirror_destroy_provider(struct g_mirror_softc *sc) 2209{ 2210 struct g_mirror_disk *disk; 2211 struct bio *bp; 2212 2213 g_topology_assert_not(); 2214 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2215 sc->sc_name)); 2216 2217 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2218 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2219 g_mirror_sync_stop(disk, 1); 2220 } 2221 2222 g_topology_lock(); 2223 g_error_provider(sc->sc_provider, ENXIO); 2224 mtx_lock(&sc->sc_queue_mtx); 2225 while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) { 2226 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 2227 /* 2228 * Abort any pending I/O that wasn't generated by us. 2229 * Synchronization requests and requests destined for individual 2230 * mirror components can be destroyed immediately. 2231 */ 2232 if (bp->bio_to == sc->sc_provider && 2233 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2234 g_io_deliver(bp, ENXIO); 2235 } else { 2236 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2237 free(bp->bio_data, M_MIRROR); 2238 g_destroy_bio(bp); 2239 } 2240 } 2241 mtx_unlock(&sc->sc_queue_mtx); 2242 g_wither_provider(sc->sc_provider, ENXIO); 2243 sc->sc_provider = NULL; 2244 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name); 2245 g_topology_unlock(); 2246} 2247 2248static void 2249g_mirror_go(void *arg) 2250{ 2251 struct g_mirror_softc *sc; 2252 2253 sc = arg; 2254 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2255 g_mirror_event_send(sc, 0, 2256 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2257} 2258 2259static u_int 2260g_mirror_determine_state(struct g_mirror_disk *disk) 2261{ 2262 struct g_mirror_softc *sc; 2263 u_int state; 2264 2265 sc = disk->d_softc; 2266 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2267 if ((disk->d_flags & 2268 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 && 2269 (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 || 2270 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) { 2271 /* Disk does not need synchronization. */ 2272 state = G_MIRROR_DISK_STATE_ACTIVE; 2273 } else { 2274 if ((sc->sc_flags & 2275 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2276 (disk->d_flags & 2277 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2278 /* 2279 * We can start synchronization from 2280 * the stored offset. 2281 */ 2282 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2283 } else { 2284 state = G_MIRROR_DISK_STATE_STALE; 2285 } 2286 } 2287 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2288 /* 2289 * Reset all synchronization data for this disk, 2290 * because if it even was synchronized, it was 2291 * synchronized to disks with different syncid. 2292 */ 2293 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2294 disk->d_sync.ds_offset = 0; 2295 disk->d_sync.ds_offset_done = 0; 2296 disk->d_sync.ds_syncid = sc->sc_syncid; 2297 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2298 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2299 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2300 } else { 2301 state = G_MIRROR_DISK_STATE_STALE; 2302 } 2303 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2304 /* 2305 * Not good, NOT GOOD! 2306 * It means that mirror was started on stale disks 2307 * and more fresh disk just arrive. 2308 * If there were writes, mirror is broken, sorry. 2309 * I think the best choice here is don't touch 2310 * this disk and inform the user loudly. 2311 */ 2312 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2313 "disk (%s) arrives!! It will not be connected to the " 2314 "running device.", sc->sc_name, 2315 g_mirror_get_diskname(disk)); 2316 g_mirror_destroy_disk(disk); 2317 state = G_MIRROR_DISK_STATE_NONE; 2318 /* Return immediately, because disk was destroyed. */ 2319 return (state); 2320 } 2321 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2322 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2323 return (state); 2324} 2325 2326/* 2327 * Update device state. 2328 */ 2329static void 2330g_mirror_update_device(struct g_mirror_softc *sc, bool force) 2331{ 2332 struct g_mirror_disk *disk; 2333 u_int state; 2334 2335 sx_assert(&sc->sc_lock, SX_XLOCKED); 2336 2337 switch (sc->sc_state) { 2338 case G_MIRROR_DEVICE_STATE_STARTING: 2339 { 2340 struct g_mirror_disk *pdisk, *tdisk; 2341 u_int dirty, ndisks, genid, syncid; 2342 bool broken; 2343 2344 KASSERT(sc->sc_provider == NULL, 2345 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2346 /* 2347 * Are we ready? We are, if all disks are connected or 2348 * if we have any disks and 'force' is true. 2349 */ 2350 ndisks = g_mirror_ndisks(sc, -1); 2351 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2352 ; 2353 } else if (ndisks == 0) { 2354 /* 2355 * Disks went down in starting phase, so destroy 2356 * device. 2357 */ 2358 callout_drain(&sc->sc_callout); 2359 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2360 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2361 sc->sc_rootmount); 2362 root_mount_rel(sc->sc_rootmount); 2363 sc->sc_rootmount = NULL; 2364 return; 2365 } else { 2366 return; 2367 } 2368 2369 /* 2370 * Activate all disks with the biggest syncid. 2371 */ 2372 if (force) { 2373 /* 2374 * If 'force' is true, we have been called due to 2375 * timeout, so don't bother canceling timeout. 2376 */ 2377 ndisks = 0; 2378 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2379 if ((disk->d_flags & 2380 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2381 ndisks++; 2382 } 2383 } 2384 if (ndisks == 0) { 2385 /* No valid disks found, destroy device. */ 2386 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2387 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2388 __LINE__, sc->sc_rootmount); 2389 root_mount_rel(sc->sc_rootmount); 2390 sc->sc_rootmount = NULL; 2391 return; 2392 } 2393 } else { 2394 /* Cancel timeout. */ 2395 callout_drain(&sc->sc_callout); 2396 } 2397 2398 /* 2399 * Find the biggest genid. 2400 */ 2401 genid = 0; 2402 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2403 if (disk->d_genid > genid) 2404 genid = disk->d_genid; 2405 } 2406 sc->sc_genid = genid; 2407 /* 2408 * Remove all disks without the biggest genid. 2409 */ 2410 broken = false; 2411 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2412 if (disk->d_genid < genid) { 2413 G_MIRROR_DEBUG(0, 2414 "Component %s (device %s) broken, skipping.", 2415 g_mirror_get_diskname(disk), sc->sc_name); 2416 g_mirror_destroy_disk(disk); 2417 /* 2418 * Bump the syncid in case we discover a healthy 2419 * replacement disk after starting the mirror. 2420 */ 2421 broken = true; 2422 } 2423 } 2424 2425 /* 2426 * Find the biggest syncid. 2427 */ 2428 syncid = 0; 2429 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2430 if (disk->d_sync.ds_syncid > syncid) 2431 syncid = disk->d_sync.ds_syncid; 2432 } 2433 2434 /* 2435 * Here we need to look for dirty disks and if all disks 2436 * with the biggest syncid are dirty, we have to choose 2437 * one with the biggest priority and rebuild the rest. 2438 */ 2439 /* 2440 * Find the number of dirty disks with the biggest syncid. 2441 * Find the number of disks with the biggest syncid. 2442 * While here, find a disk with the biggest priority. 2443 */ 2444 dirty = ndisks = 0; 2445 pdisk = NULL; 2446 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2447 if (disk->d_sync.ds_syncid != syncid) 2448 continue; 2449 if ((disk->d_flags & 2450 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2451 continue; 2452 } 2453 ndisks++; 2454 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2455 dirty++; 2456 if (pdisk == NULL || 2457 pdisk->d_priority < disk->d_priority) { 2458 pdisk = disk; 2459 } 2460 } 2461 } 2462 if (dirty == 0) { 2463 /* No dirty disks at all, great. */ 2464 } else if (dirty == ndisks) { 2465 /* 2466 * Force synchronization for all dirty disks except one 2467 * with the biggest priority. 2468 */ 2469 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2470 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2471 "master disk for synchronization.", 2472 g_mirror_get_diskname(pdisk), sc->sc_name); 2473 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2474 if (disk->d_sync.ds_syncid != syncid) 2475 continue; 2476 if ((disk->d_flags & 2477 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2478 continue; 2479 } 2480 KASSERT((disk->d_flags & 2481 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2482 ("Disk %s isn't marked as dirty.", 2483 g_mirror_get_diskname(disk))); 2484 /* Skip the disk with the biggest priority. */ 2485 if (disk == pdisk) 2486 continue; 2487 disk->d_sync.ds_syncid = 0; 2488 } 2489 } else if (dirty < ndisks) { 2490 /* 2491 * Force synchronization for all dirty disks. 2492 * We have some non-dirty disks. 2493 */ 2494 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2495 if (disk->d_sync.ds_syncid != syncid) 2496 continue; 2497 if ((disk->d_flags & 2498 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2499 continue; 2500 } 2501 if ((disk->d_flags & 2502 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2503 continue; 2504 } 2505 disk->d_sync.ds_syncid = 0; 2506 } 2507 } 2508 2509 /* Reset hint. */ 2510 sc->sc_hint = NULL; 2511 sc->sc_syncid = syncid; 2512 if (force || broken) { 2513 /* Remember to bump syncid on first write. */ 2514 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2515 } 2516 state = G_MIRROR_DEVICE_STATE_RUNNING; 2517 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2518 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2519 g_mirror_device_state2str(state)); 2520 sc->sc_state = state; 2521 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2522 state = g_mirror_determine_state(disk); 2523 g_mirror_event_send(disk, state, 2524 G_MIRROR_EVENT_DONTWAIT); 2525 if (state == G_MIRROR_DISK_STATE_STALE) 2526 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2527 } 2528 break; 2529 } 2530 case G_MIRROR_DEVICE_STATE_RUNNING: 2531 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2532 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2533 /* 2534 * No usable disks, so destroy the device. 2535 */ 2536 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2537 break; 2538 } else if (g_mirror_ndisks(sc, 2539 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2540 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2541 /* 2542 * We have active disks, launch provider if it doesn't 2543 * exist. 2544 */ 2545 if (sc->sc_provider == NULL) 2546 g_mirror_launch_provider(sc); 2547 if (sc->sc_rootmount != NULL) { 2548 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2549 __LINE__, sc->sc_rootmount); 2550 root_mount_rel(sc->sc_rootmount); 2551 sc->sc_rootmount = NULL; 2552 } 2553 } 2554 /* 2555 * Genid should be bumped immediately, so do it here. 2556 */ 2557 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2558 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2559 g_mirror_bump_genid(sc); 2560 } 2561 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) { 2562 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW; 2563 g_mirror_bump_syncid(sc); 2564 } 2565 break; 2566 default: 2567 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2568 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2569 break; 2570 } 2571} 2572 2573/* 2574 * Update disk state and device state if needed. 2575 */ 2576#define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2577 "Disk %s state changed from %s to %s (device %s).", \ 2578 g_mirror_get_diskname(disk), \ 2579 g_mirror_disk_state2str(disk->d_state), \ 2580 g_mirror_disk_state2str(state), sc->sc_name) 2581static int 2582g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2583{ 2584 struct g_mirror_softc *sc; 2585 2586 sc = disk->d_softc; 2587 sx_assert(&sc->sc_lock, SX_XLOCKED); 2588 2589again: 2590 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2591 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2592 g_mirror_disk_state2str(state)); 2593 switch (state) { 2594 case G_MIRROR_DISK_STATE_NEW: 2595 /* 2596 * Possible scenarios: 2597 * 1. New disk arrive. 2598 */ 2599 /* Previous state should be NONE. */ 2600 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2601 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2602 g_mirror_disk_state2str(disk->d_state))); 2603 DISK_STATE_CHANGED(); 2604 2605 disk->d_state = state; 2606 if (LIST_EMPTY(&sc->sc_disks)) 2607 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2608 else { 2609 struct g_mirror_disk *dp; 2610 2611 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2612 if (disk->d_priority >= dp->d_priority) { 2613 LIST_INSERT_BEFORE(dp, disk, d_next); 2614 dp = NULL; 2615 break; 2616 } 2617 if (LIST_NEXT(dp, d_next) == NULL) 2618 break; 2619 } 2620 if (dp != NULL) 2621 LIST_INSERT_AFTER(dp, disk, d_next); 2622 } 2623 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2624 sc->sc_name, g_mirror_get_diskname(disk)); 2625 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2626 break; 2627 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2628 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2629 g_mirror_device_state2str(sc->sc_state), 2630 g_mirror_get_diskname(disk), 2631 g_mirror_disk_state2str(disk->d_state))); 2632 state = g_mirror_determine_state(disk); 2633 if (state != G_MIRROR_DISK_STATE_NONE) 2634 goto again; 2635 break; 2636 case G_MIRROR_DISK_STATE_ACTIVE: 2637 /* 2638 * Possible scenarios: 2639 * 1. New disk does not need synchronization. 2640 * 2. Synchronization process finished successfully. 2641 */ 2642 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2643 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2644 g_mirror_device_state2str(sc->sc_state), 2645 g_mirror_get_diskname(disk), 2646 g_mirror_disk_state2str(disk->d_state))); 2647 /* Previous state should be NEW or SYNCHRONIZING. */ 2648 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2649 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2650 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2651 g_mirror_disk_state2str(disk->d_state))); 2652 DISK_STATE_CHANGED(); 2653 2654 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2655 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2656 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2657 g_mirror_sync_stop(disk, 0); 2658 } 2659 disk->d_state = state; 2660 disk->d_sync.ds_offset = 0; 2661 disk->d_sync.ds_offset_done = 0; 2662 g_mirror_update_idle(sc, disk); 2663 g_mirror_update_metadata(disk); 2664 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2665 sc->sc_name, g_mirror_get_diskname(disk)); 2666 break; 2667 case G_MIRROR_DISK_STATE_STALE: 2668 /* 2669 * Possible scenarios: 2670 * 1. Stale disk was connected. 2671 */ 2672 /* Previous state should be NEW. */ 2673 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2674 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2675 g_mirror_disk_state2str(disk->d_state))); 2676 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2677 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2678 g_mirror_device_state2str(sc->sc_state), 2679 g_mirror_get_diskname(disk), 2680 g_mirror_disk_state2str(disk->d_state))); 2681 /* 2682 * STALE state is only possible if device is marked 2683 * NOAUTOSYNC. 2684 */ 2685 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2686 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2687 g_mirror_device_state2str(sc->sc_state), 2688 g_mirror_get_diskname(disk), 2689 g_mirror_disk_state2str(disk->d_state))); 2690 DISK_STATE_CHANGED(); 2691 2692 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2693 disk->d_state = state; 2694 g_mirror_update_metadata(disk); 2695 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2696 sc->sc_name, g_mirror_get_diskname(disk)); 2697 break; 2698 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2699 /* 2700 * Possible scenarios: 2701 * 1. Disk which needs synchronization was connected. 2702 */ 2703 /* Previous state should be NEW. */ 2704 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2705 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2706 g_mirror_disk_state2str(disk->d_state))); 2707 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2708 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2709 g_mirror_device_state2str(sc->sc_state), 2710 g_mirror_get_diskname(disk), 2711 g_mirror_disk_state2str(disk->d_state))); 2712 DISK_STATE_CHANGED(); 2713 2714 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2715 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2716 disk->d_state = state; 2717 if (sc->sc_provider != NULL) { 2718 g_mirror_sync_start(disk); 2719 g_mirror_update_metadata(disk); 2720 } 2721 break; 2722 case G_MIRROR_DISK_STATE_DISCONNECTED: 2723 /* 2724 * Possible scenarios: 2725 * 1. Device wasn't running yet, but disk disappear. 2726 * 2. Disk was active and disapppear. 2727 * 3. Disk disappear during synchronization process. 2728 */ 2729 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2730 /* 2731 * Previous state should be ACTIVE, STALE or 2732 * SYNCHRONIZING. 2733 */ 2734 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2735 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2736 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2737 ("Wrong disk state (%s, %s).", 2738 g_mirror_get_diskname(disk), 2739 g_mirror_disk_state2str(disk->d_state))); 2740 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2741 /* Previous state should be NEW. */ 2742 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2743 ("Wrong disk state (%s, %s).", 2744 g_mirror_get_diskname(disk), 2745 g_mirror_disk_state2str(disk->d_state))); 2746 /* 2747 * Reset bumping syncid if disk disappeared in STARTING 2748 * state. 2749 */ 2750 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2751 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2752#ifdef INVARIANTS 2753 } else { 2754 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2755 sc->sc_name, 2756 g_mirror_device_state2str(sc->sc_state), 2757 g_mirror_get_diskname(disk), 2758 g_mirror_disk_state2str(disk->d_state))); 2759#endif 2760 } 2761 DISK_STATE_CHANGED(); 2762 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2763 sc->sc_name, g_mirror_get_diskname(disk)); 2764 2765 g_mirror_destroy_disk(disk); 2766 break; 2767 case G_MIRROR_DISK_STATE_DESTROY: 2768 { 2769 int error; 2770 2771 error = g_mirror_clear_metadata(disk); 2772 if (error != 0) { 2773 G_MIRROR_DEBUG(0, 2774 "Device %s: failed to clear metadata on %s: %d.", 2775 sc->sc_name, g_mirror_get_diskname(disk), error); 2776 break; 2777 } 2778 DISK_STATE_CHANGED(); 2779 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2780 sc->sc_name, g_mirror_get_diskname(disk)); 2781 2782 g_mirror_destroy_disk(disk); 2783 sc->sc_ndisks--; 2784 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2785 g_mirror_update_metadata(disk); 2786 } 2787 break; 2788 } 2789 default: 2790 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2791 break; 2792 } 2793 return (0); 2794} 2795#undef DISK_STATE_CHANGED 2796 2797int 2798g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2799{ 2800 struct g_provider *pp; 2801 u_char *buf; 2802 int error; 2803 2804 g_topology_assert(); 2805 2806 error = g_access(cp, 1, 0, 0); 2807 if (error != 0) 2808 return (error); 2809 pp = cp->provider; 2810 g_topology_unlock(); 2811 /* Metadata are stored on last sector. */ 2812 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2813 &error); 2814 g_topology_lock(); 2815 g_access(cp, -1, 0, 0); 2816 if (buf == NULL) { 2817 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2818 cp->provider->name, error); 2819 return (error); 2820 } 2821 2822 /* Decode metadata. */ 2823 error = mirror_metadata_decode(buf, md); 2824 g_free(buf); 2825 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2826 return (EINVAL); 2827 if (md->md_version > G_MIRROR_VERSION) { 2828 G_MIRROR_DEBUG(0, 2829 "Kernel module is too old to handle metadata from %s.", 2830 cp->provider->name); 2831 return (EINVAL); 2832 } 2833 if (error != 0) { 2834 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2835 cp->provider->name); 2836 return (error); 2837 } 2838 2839 return (0); 2840} 2841 2842static int 2843g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2844 struct g_mirror_metadata *md) 2845{ 2846 2847 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2848 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2849 pp->name, md->md_did); 2850 return (EEXIST); 2851 } 2852 if (md->md_all != sc->sc_ndisks) { 2853 G_MIRROR_DEBUG(1, 2854 "Invalid '%s' field on disk %s (device %s), skipping.", 2855 "md_all", pp->name, sc->sc_name); 2856 return (EINVAL); 2857 } 2858 if (md->md_slice != sc->sc_slice) { 2859 G_MIRROR_DEBUG(1, 2860 "Invalid '%s' field on disk %s (device %s), skipping.", 2861 "md_slice", pp->name, sc->sc_name); 2862 return (EINVAL); 2863 } 2864 if (md->md_balance != sc->sc_balance) { 2865 G_MIRROR_DEBUG(1, 2866 "Invalid '%s' field on disk %s (device %s), skipping.", 2867 "md_balance", pp->name, sc->sc_name); 2868 return (EINVAL); 2869 } 2870#if 0 2871 if (md->md_mediasize != sc->sc_mediasize) { 2872 G_MIRROR_DEBUG(1, 2873 "Invalid '%s' field on disk %s (device %s), skipping.", 2874 "md_mediasize", pp->name, sc->sc_name); 2875 return (EINVAL); 2876 } 2877#endif 2878 if (sc->sc_mediasize > pp->mediasize) { 2879 G_MIRROR_DEBUG(1, 2880 "Invalid size of disk %s (device %s), skipping.", pp->name, 2881 sc->sc_name); 2882 return (EINVAL); 2883 } 2884 if (md->md_sectorsize != sc->sc_sectorsize) { 2885 G_MIRROR_DEBUG(1, 2886 "Invalid '%s' field on disk %s (device %s), skipping.", 2887 "md_sectorsize", pp->name, sc->sc_name); 2888 return (EINVAL); 2889 } 2890 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2891 G_MIRROR_DEBUG(1, 2892 "Invalid sector size of disk %s (device %s), skipping.", 2893 pp->name, sc->sc_name); 2894 return (EINVAL); 2895 } 2896 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2897 G_MIRROR_DEBUG(1, 2898 "Invalid device flags on disk %s (device %s), skipping.", 2899 pp->name, sc->sc_name); 2900 return (EINVAL); 2901 } 2902 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2903 G_MIRROR_DEBUG(1, 2904 "Invalid disk flags on disk %s (device %s), skipping.", 2905 pp->name, sc->sc_name); 2906 return (EINVAL); 2907 } 2908 return (0); 2909} 2910 2911int 2912g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2913 struct g_mirror_metadata *md) 2914{ 2915 struct g_mirror_disk *disk; 2916 int error; 2917 2918 g_topology_assert_not(); 2919 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2920 2921 error = g_mirror_check_metadata(sc, pp, md); 2922 if (error != 0) 2923 return (error); 2924 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2925 md->md_genid < sc->sc_genid) { 2926 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2927 pp->name, sc->sc_name); 2928 return (EINVAL); 2929 } 2930 disk = g_mirror_init_disk(sc, pp, md, &error); 2931 if (disk == NULL) 2932 return (error); 2933 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2934 G_MIRROR_EVENT_WAIT); 2935 if (error != 0) 2936 return (error); 2937 if (md->md_version < G_MIRROR_VERSION) { 2938 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2939 pp->name, md->md_version, G_MIRROR_VERSION); 2940 g_mirror_update_metadata(disk); 2941 } 2942 return (0); 2943} 2944 2945static void 2946g_mirror_destroy_delayed(void *arg, int flag) 2947{ 2948 struct g_mirror_softc *sc; 2949 int error; 2950 2951 if (flag == EV_CANCEL) { 2952 G_MIRROR_DEBUG(1, "Destroying canceled."); 2953 return; 2954 } 2955 sc = arg; 2956 g_topology_unlock(); 2957 sx_xlock(&sc->sc_lock); 2958 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2959 ("DESTROY flag set on %s.", sc->sc_name)); 2960 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0, 2961 ("CLOSEWAIT flag not set on %s.", sc->sc_name)); 2962 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2963 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2964 if (error != 0) { 2965 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2966 sc->sc_name, error); 2967 sx_xunlock(&sc->sc_lock); 2968 } 2969 g_topology_lock(); 2970} 2971 2972static int 2973g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2974{ 2975 struct g_mirror_softc *sc; 2976 int error = 0; 2977 2978 g_topology_assert(); 2979 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2980 acw, ace); 2981 2982 sc = pp->private; 2983 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2984 2985 g_topology_unlock(); 2986 sx_xlock(&sc->sc_lock); 2987 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2988 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 || 2989 LIST_EMPTY(&sc->sc_disks)) { 2990 if (acr > 0 || acw > 0 || ace > 0) 2991 error = ENXIO; 2992 goto end; 2993 } 2994 sc->sc_provider_open += acr + acw + ace; 2995 if (pp->acw + acw == 0) 2996 g_mirror_idle(sc, 0); 2997 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 && 2998 sc->sc_provider_open == 0) 2999 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL); 3000end: 3001 sx_xunlock(&sc->sc_lock); 3002 g_topology_lock(); 3003 return (error); 3004} 3005 3006struct g_geom * 3007g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md, 3008 u_int type) 3009{ 3010 struct g_mirror_softc *sc; 3011 struct g_geom *gp; 3012 int error, timeout; 3013 3014 g_topology_assert(); 3015 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 3016 md->md_mid); 3017 3018 /* One disk is minimum. */ 3019 if (md->md_all < 1) 3020 return (NULL); 3021 /* 3022 * Action geom. 3023 */ 3024 gp = g_new_geomf(mp, "%s", md->md_name); 3025 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 3026 gp->start = g_mirror_start; 3027 gp->orphan = g_mirror_orphan; 3028 gp->access = g_mirror_access; 3029 gp->dumpconf = g_mirror_dumpconf; 3030 3031 sc->sc_type = type; 3032 sc->sc_id = md->md_mid; 3033 sc->sc_slice = md->md_slice; 3034 sc->sc_balance = md->md_balance; 3035 sc->sc_mediasize = md->md_mediasize; 3036 sc->sc_sectorsize = md->md_sectorsize; 3037 sc->sc_ndisks = md->md_all; 3038 sc->sc_flags = md->md_mflags; 3039 sc->sc_bump_id = 0; 3040 sc->sc_idle = 1; 3041 sc->sc_last_write = time_uptime; 3042 sc->sc_writes = 0; 3043 sc->sc_refcnt = 1; 3044 sx_init(&sc->sc_lock, "gmirror:lock"); 3045 TAILQ_INIT(&sc->sc_queue); 3046 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 3047 TAILQ_INIT(&sc->sc_regular_delayed); 3048 TAILQ_INIT(&sc->sc_inflight); 3049 TAILQ_INIT(&sc->sc_sync_delayed); 3050 LIST_INIT(&sc->sc_disks); 3051 TAILQ_INIT(&sc->sc_events); 3052 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 3053 callout_init(&sc->sc_callout, 1); 3054 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 3055 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 3056 gp->softc = sc; 3057 sc->sc_geom = gp; 3058 sc->sc_provider = NULL; 3059 sc->sc_provider_open = 0; 3060 /* 3061 * Synchronization geom. 3062 */ 3063 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3064 gp->softc = sc; 3065 gp->orphan = g_mirror_orphan; 3066 sc->sc_sync.ds_geom = gp; 3067 sc->sc_sync.ds_ndisks = 0; 3068 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 3069 "g_mirror %s", md->md_name); 3070 if (error != 0) { 3071 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 3072 sc->sc_name); 3073 g_destroy_geom(sc->sc_sync.ds_geom); 3074 g_destroy_geom(sc->sc_geom); 3075 g_mirror_free_device(sc); 3076 return (NULL); 3077 } 3078 3079 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3080 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3081 3082 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3083 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3084 /* 3085 * Run timeout. 3086 */ 3087 timeout = g_mirror_timeout * hz; 3088 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3089 return (sc->sc_geom); 3090} 3091 3092int 3093g_mirror_destroy(struct g_mirror_softc *sc, int how) 3094{ 3095 struct g_mirror_disk *disk; 3096 3097 g_topology_assert_not(); 3098 sx_assert(&sc->sc_lock, SX_XLOCKED); 3099 3100 if (sc->sc_provider_open != 0) { 3101 switch (how) { 3102 case G_MIRROR_DESTROY_SOFT: 3103 G_MIRROR_DEBUG(1, 3104 "Device %s is still open (%d).", sc->sc_name, 3105 sc->sc_provider_open); 3106 return (EBUSY); 3107 case G_MIRROR_DESTROY_DELAYED: 3108 G_MIRROR_DEBUG(1, 3109 "Device %s will be destroyed on last close.", 3110 sc->sc_name); 3111 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3112 if (disk->d_state == 3113 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3114 g_mirror_sync_stop(disk, 1); 3115 } 3116 } 3117 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT; 3118 return (EBUSY); 3119 case G_MIRROR_DESTROY_HARD: 3120 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3121 "can't be definitely removed.", sc->sc_name); 3122 } 3123 } 3124 3125 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3126 sx_xunlock(&sc->sc_lock); 3127 return (0); 3128 } 3129 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3130 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN; 3131 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3132 sx_xunlock(&sc->sc_lock); 3133 mtx_lock(&sc->sc_queue_mtx); 3134 wakeup(sc); 3135 mtx_unlock(&sc->sc_queue_mtx); 3136 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3137 while (sc->sc_worker != NULL) 3138 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3139 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3140 sx_xlock(&sc->sc_lock); 3141 g_mirror_destroy_device(sc); 3142 return (0); 3143} 3144 3145static void 3146g_mirror_taste_orphan(struct g_consumer *cp) 3147{ 3148 3149 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3150 cp->provider->name)); 3151} 3152 3153static struct g_geom * 3154g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3155{ 3156 struct g_mirror_metadata md; 3157 struct g_mirror_softc *sc; 3158 struct g_consumer *cp; 3159 struct g_geom *gp; 3160 int error; 3161 3162 g_topology_assert(); 3163 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3164 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3165 3166 gp = g_new_geomf(mp, "mirror:taste"); 3167 /* 3168 * This orphan function should be never called. 3169 */ 3170 gp->orphan = g_mirror_taste_orphan; 3171 cp = g_new_consumer(gp); 3172 g_attach(cp, pp); 3173 error = g_mirror_read_metadata(cp, &md); 3174 g_detach(cp); 3175 g_destroy_consumer(cp); 3176 g_destroy_geom(gp); 3177 if (error != 0) 3178 return (NULL); 3179 gp = NULL; 3180 3181 if (md.md_provider[0] != '\0' && 3182 !g_compare_names(md.md_provider, pp->name)) 3183 return (NULL); 3184 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3185 return (NULL); 3186 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3187 G_MIRROR_DEBUG(0, 3188 "Device %s: provider %s marked as inactive, skipping.", 3189 md.md_name, pp->name); 3190 return (NULL); 3191 } 3192 if (g_mirror_debug >= 2) 3193 mirror_metadata_dump(&md); 3194 3195 /* 3196 * Let's check if device already exists. 3197 */ 3198 sc = NULL; 3199 LIST_FOREACH(gp, &mp->geom, geom) { 3200 sc = gp->softc; 3201 if (sc == NULL) 3202 continue; 3203 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 3204 continue; 3205 if (sc->sc_sync.ds_geom == gp) 3206 continue; 3207 if (strcmp(md.md_name, sc->sc_name) != 0) 3208 continue; 3209 if (md.md_mid != sc->sc_id) { 3210 G_MIRROR_DEBUG(0, "Device %s already configured.", 3211 sc->sc_name); 3212 return (NULL); 3213 } 3214 break; 3215 } 3216 if (gp == NULL) { 3217 gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC); 3218 if (gp == NULL) { 3219 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3220 md.md_name); 3221 return (NULL); 3222 } 3223 sc = gp->softc; 3224 } 3225 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3226 g_topology_unlock(); 3227 sx_xlock(&sc->sc_lock); 3228 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3229 error = g_mirror_add_disk(sc, pp, &md); 3230 if (error != 0) { 3231 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3232 pp->name, gp->name, error); 3233 if (LIST_EMPTY(&sc->sc_disks)) { 3234 g_cancel_event(sc); 3235 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3236 g_topology_lock(); 3237 return (NULL); 3238 } 3239 gp = NULL; 3240 } 3241 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3242 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3243 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3244 g_topology_lock(); 3245 return (NULL); 3246 } 3247 sx_xunlock(&sc->sc_lock); 3248 g_topology_lock(); 3249 return (gp); 3250} 3251 3252static void 3253g_mirror_resize(struct g_consumer *cp) 3254{ 3255 struct g_mirror_disk *disk; 3256 3257 g_topology_assert(); 3258 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3259 3260 disk = cp->private; 3261 if (disk == NULL) 3262 return; 3263 g_topology_unlock(); 3264 g_mirror_update_metadata(disk); 3265 g_topology_lock(); 3266} 3267 3268static int 3269g_mirror_destroy_geom(struct gctl_req *req __unused, 3270 struct g_class *mp __unused, struct g_geom *gp) 3271{ 3272 struct g_mirror_softc *sc; 3273 int error; 3274 3275 g_topology_unlock(); 3276 sc = gp->softc; 3277 sx_xlock(&sc->sc_lock); 3278 g_cancel_event(sc); 3279 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3280 if (error != 0) 3281 sx_xunlock(&sc->sc_lock); 3282 g_topology_lock(); 3283 return (error); 3284} 3285 3286static void 3287g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3288 struct g_consumer *cp, struct g_provider *pp) 3289{ 3290 struct g_mirror_softc *sc; 3291 3292 g_topology_assert(); 3293 3294 sc = gp->softc; 3295 if (sc == NULL) 3296 return; 3297 /* Skip synchronization geom. */ 3298 if (gp == sc->sc_sync.ds_geom) 3299 return; 3300 if (pp != NULL) { 3301 /* Nothing here. */ 3302 } else if (cp != NULL) { 3303 struct g_mirror_disk *disk; 3304 3305 disk = cp->private; 3306 if (disk == NULL) 3307 return; 3308 g_topology_unlock(); 3309 sx_xlock(&sc->sc_lock); 3310 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3311 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3312 sbuf_printf(sb, "%s<Synchronized>", indent); 3313 if (disk->d_sync.ds_offset == 0) 3314 sbuf_printf(sb, "0%%"); 3315 else { 3316 sbuf_printf(sb, "%u%%", 3317 (u_int)((disk->d_sync.ds_offset * 100) / 3318 sc->sc_provider->mediasize)); 3319 } 3320 sbuf_printf(sb, "</Synchronized>\n"); 3321 if (disk->d_sync.ds_offset > 0) { 3322 sbuf_printf(sb, "%s<BytesSynced>%jd" 3323 "</BytesSynced>\n", indent, 3324 (intmax_t)disk->d_sync.ds_offset); 3325 } 3326 } 3327 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3328 disk->d_sync.ds_syncid); 3329 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3330 disk->d_genid); 3331 sbuf_printf(sb, "%s<Flags>", indent); 3332 if (disk->d_flags == 0) 3333 sbuf_printf(sb, "NONE"); 3334 else { 3335 int first = 1; 3336 3337#define ADD_FLAG(flag, name) do { \ 3338 if ((disk->d_flags & (flag)) != 0) { \ 3339 if (!first) \ 3340 sbuf_printf(sb, ", "); \ 3341 else \ 3342 first = 0; \ 3343 sbuf_printf(sb, name); \ 3344 } \ 3345} while (0) 3346 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3347 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3348 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3349 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3350 "SYNCHRONIZING"); 3351 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3352 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3353#undef ADD_FLAG 3354 } 3355 sbuf_printf(sb, "</Flags>\n"); 3356 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3357 disk->d_priority); 3358 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3359 g_mirror_disk_state2str(disk->d_state)); 3360 sx_xunlock(&sc->sc_lock); 3361 g_topology_lock(); 3362 } else { 3363 g_topology_unlock(); 3364 sx_xlock(&sc->sc_lock); 3365 sbuf_printf(sb, "%s<Type>", indent); 3366 switch (sc->sc_type) { 3367 case G_MIRROR_TYPE_AUTOMATIC: 3368 sbuf_printf(sb, "AUTOMATIC"); 3369 break; 3370 case G_MIRROR_TYPE_MANUAL: 3371 sbuf_printf(sb, "MANUAL"); 3372 break; 3373 default: 3374 sbuf_printf(sb, "UNKNOWN"); 3375 break; 3376 } 3377 sbuf_printf(sb, "</Type>\n"); 3378 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3379 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3380 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3381 sbuf_printf(sb, "%s<Flags>", indent); 3382 if (sc->sc_flags == 0) 3383 sbuf_printf(sb, "NONE"); 3384 else { 3385 int first = 1; 3386 3387#define ADD_FLAG(flag, name) do { \ 3388 if ((sc->sc_flags & (flag)) != 0) { \ 3389 if (!first) \ 3390 sbuf_printf(sb, ", "); \ 3391 else \ 3392 first = 0; \ 3393 sbuf_printf(sb, name); \ 3394 } \ 3395} while (0) 3396 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3397 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3398#undef ADD_FLAG 3399 } 3400 sbuf_printf(sb, "</Flags>\n"); 3401 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3402 (u_int)sc->sc_slice); 3403 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3404 balance_name(sc->sc_balance)); 3405 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3406 sc->sc_ndisks); 3407 sbuf_printf(sb, "%s<State>", indent); 3408 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3409 sbuf_printf(sb, "%s", "STARTING"); 3410 else if (sc->sc_ndisks == 3411 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3412 sbuf_printf(sb, "%s", "COMPLETE"); 3413 else 3414 sbuf_printf(sb, "%s", "DEGRADED"); 3415 sbuf_printf(sb, "</State>\n"); 3416 sx_xunlock(&sc->sc_lock); 3417 g_topology_lock(); 3418 } 3419} 3420 3421static void 3422g_mirror_shutdown_post_sync(void *arg, int howto) 3423{ 3424 struct g_class *mp; 3425 struct g_geom *gp, *gp2; 3426 struct g_mirror_softc *sc; 3427 int error; 3428 3429 if (panicstr != NULL) 3430 return; 3431 3432 mp = arg; 3433 g_topology_lock(); 3434 g_mirror_shutdown = 1; 3435 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3436 if ((sc = gp->softc) == NULL) 3437 continue; 3438 /* Skip synchronization geom. */ 3439 if (gp == sc->sc_sync.ds_geom) 3440 continue; 3441 g_topology_unlock(); 3442 sx_xlock(&sc->sc_lock); 3443 g_mirror_idle(sc, -1); 3444 g_cancel_event(sc); 3445 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3446 if (error != 0) 3447 sx_xunlock(&sc->sc_lock); 3448 g_topology_lock(); 3449 } 3450 g_topology_unlock(); 3451} 3452 3453static void 3454g_mirror_init(struct g_class *mp) 3455{ 3456 3457 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3458 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3459 if (g_mirror_post_sync == NULL) 3460 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3461} 3462 3463static void 3464g_mirror_fini(struct g_class *mp) 3465{ 3466 3467 if (g_mirror_post_sync != NULL) 3468 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3469} 3470 3471DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3472