geom_vinum_plex.c revision 191849
1/*- 2 * Copyright (c) 2004, 2007 Lukas Ertl 3 * Copyright (c) 2007, 2009 Ulf Lilleengen 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/geom/vinum/geom_vinum_plex.c 191849 2009-05-06 18:21:48Z lulf $"); 30 31#include <sys/param.h> 32#include <sys/bio.h> 33#include <sys/lock.h> 34#include <sys/malloc.h> 35#include <sys/systm.h> 36 37#include <geom/geom.h> 38#include <geom/vinum/geom_vinum_var.h> 39#include <geom/vinum/geom_vinum_raid5.h> 40#include <geom/vinum/geom_vinum.h> 41 42static int gv_check_parity(struct gv_plex *, struct bio *, 43 struct gv_raid5_packet *); 44static int gv_normal_parity(struct gv_plex *, struct bio *, 45 struct gv_raid5_packet *); 46static void gv_plex_flush(struct gv_plex *); 47static int gv_plex_offset(struct gv_plex *, off_t, off_t, off_t *, off_t *, 48 int *, int); 49static int gv_plex_normal_request(struct gv_plex *, struct bio *, off_t, 50 off_t, caddr_t); 51void 52gv_plex_start(struct gv_plex *p, struct bio *bp) 53{ 54 struct bio *cbp; 55 struct gv_sd *s; 56 struct gv_raid5_packet *wp; 57 caddr_t addr; 58 off_t bcount, boff, len; 59 60 bcount = bp->bio_length; 61 addr = bp->bio_data; 62 boff = bp->bio_offset; 63 64 /* Walk over the whole length of the request, we might split it up. */ 65 while (bcount > 0) { 66 wp = NULL; 67 68 /* 69 * RAID5 plexes need special treatment, as a single request 70 * might involve several read/write sub-requests. 71 */ 72 if (p->org == GV_PLEX_RAID5) { 73 wp = gv_raid5_start(p, bp, addr, boff, bcount); 74 if (wp == NULL) 75 return; 76 77 len = wp->length; 78 79 if (TAILQ_EMPTY(&wp->bits)) 80 g_free(wp); 81 else if (wp->lockbase != -1) 82 TAILQ_INSERT_TAIL(&p->packets, wp, list); 83 84 /* 85 * Requests to concatenated and striped plexes go straight 86 * through. 87 */ 88 } else { 89 len = gv_plex_normal_request(p, bp, boff, bcount, addr); 90 } 91 if (len < 0) 92 return; 93 94 bcount -= len; 95 addr += len; 96 boff += len; 97 } 98 99 /* 100 * Fire off all sub-requests. We get the correct consumer (== drive) 101 * to send each request to via the subdisk that was stored in 102 * cbp->bio_caller1. 103 */ 104 cbp = bioq_takefirst(p->bqueue); 105 while (cbp != NULL) { 106 /* 107 * RAID5 sub-requests need to come in correct order, otherwise 108 * we trip over the parity, as it might be overwritten by 109 * another sub-request. We abuse cbp->bio_caller2 to mark 110 * potential overlap situations. 111 */ 112 if (cbp->bio_caller2 != NULL && gv_stripe_active(p, cbp)) { 113 /* Park the bio on the waiting queue. */ 114 cbp->bio_cflags |= GV_BIO_ONHOLD; 115 bioq_disksort(p->wqueue, cbp); 116 } else { 117 s = cbp->bio_caller1; 118 g_io_request(cbp, s->drive_sc->consumer); 119 } 120 cbp = bioq_takefirst(p->bqueue); 121 } 122} 123 124static int 125gv_plex_offset(struct gv_plex *p, off_t boff, off_t bcount, off_t *real_off, 126 off_t *real_len, int *sdno, int growing) 127{ 128 struct gv_sd *s; 129 int i, sdcount; 130 off_t len_left, stripeend, stripeno, stripestart; 131 132 switch (p->org) { 133 case GV_PLEX_CONCAT: 134 /* 135 * Find the subdisk where this request starts. The subdisks in 136 * this list must be ordered by plex_offset. 137 */ 138 i = 0; 139 LIST_FOREACH(s, &p->subdisks, in_plex) { 140 if (s->plex_offset <= boff && 141 s->plex_offset + s->size > boff) { 142 *sdno = i; 143 break; 144 } 145 i++; 146 } 147 if (s == NULL || s->drive_sc == NULL) 148 return (GV_ERR_NOTFOUND); 149 150 /* Calculate corresponding offsets on disk. */ 151 *real_off = boff - s->plex_offset; 152 len_left = s->size - (*real_off); 153 KASSERT(len_left >= 0, ("gv_plex_offset: len_left < 0")); 154 *real_len = (bcount > len_left) ? len_left : bcount; 155 break; 156 157 case GV_PLEX_STRIPED: 158 /* The number of the stripe where the request starts. */ 159 stripeno = boff / p->stripesize; 160 KASSERT(stripeno >= 0, ("gv_plex_offset: stripeno < 0")); 161 162 /* Take growing subdisks into account when calculating. */ 163 sdcount = gv_sdcount(p, (boff >= p->synced)); 164 165 if (!(boff + bcount <= p->synced) && 166 (p->flags & GV_PLEX_GROWING) && 167 !growing) 168 return (GV_ERR_ISBUSY); 169 *sdno = stripeno % sdcount; 170 171 KASSERT(sdno >= 0, ("gv_plex_offset: sdno < 0")); 172 stripestart = (stripeno / sdcount) * 173 p->stripesize; 174 KASSERT(stripestart >= 0, ("gv_plex_offset: stripestart < 0")); 175 stripeend = stripestart + p->stripesize; 176 *real_off = boff - (stripeno * p->stripesize) + 177 stripestart; 178 len_left = stripeend - *real_off; 179 KASSERT(len_left >= 0, ("gv_plex_offset: len_left < 0")); 180 181 *real_len = (bcount <= len_left) ? bcount : len_left; 182 break; 183 184 default: 185 return (GV_ERR_PLEXORG); 186 } 187 return (0); 188} 189 190/* 191 * Prepare a normal plex request. 192 */ 193static int 194gv_plex_normal_request(struct gv_plex *p, struct bio *bp, off_t boff, 195 off_t bcount, caddr_t addr) 196{ 197 struct gv_sd *s; 198 struct bio *cbp; 199 off_t real_len, real_off; 200 int i, err, sdno; 201 202 s = NULL; 203 sdno = -1; 204 real_len = real_off = 0; 205 206 err = ENXIO; 207 208 if (p == NULL || LIST_EMPTY(&p->subdisks)) 209 goto bad; 210 211 err = gv_plex_offset(p, boff, bcount, &real_off, 212 &real_len, &sdno, (bp->bio_pflags & GV_BIO_SYNCREQ)); 213 /* If the request was blocked, put it into wait. */ 214 if (err == GV_ERR_ISBUSY) { 215 bioq_disksort(p->rqueue, bp); 216 return (-1); /* "Fail", and delay request. */ 217 } 218 if (err) { 219 err = ENXIO; 220 goto bad; 221 } 222 err = ENXIO; 223 224 /* Find the right subdisk. */ 225 i = 0; 226 LIST_FOREACH(s, &p->subdisks, in_plex) { 227 if (i == sdno) 228 break; 229 i++; 230 } 231 232 /* Subdisk not found. */ 233 if (s == NULL || s->drive_sc == NULL) 234 goto bad; 235 236 /* Now check if we can handle the request on this subdisk. */ 237 switch (s->state) { 238 case GV_SD_UP: 239 /* If the subdisk is up, just continue. */ 240 break; 241 case GV_SD_DOWN: 242 if (bp->bio_cflags & GV_BIO_INTERNAL) 243 G_VINUM_DEBUG(0, "subdisk must be in the stale state in" 244 " order to perform administrative requests"); 245 goto bad; 246 case GV_SD_STALE: 247 if (!(bp->bio_cflags & GV_BIO_SYNCREQ)) { 248 G_VINUM_DEBUG(0, "subdisk stale, unable to perform " 249 "regular requests"); 250 goto bad; 251 } 252 253 G_VINUM_DEBUG(1, "sd %s is initializing", s->name); 254 gv_set_sd_state(s, GV_SD_INITIALIZING, GV_SETSTATE_FORCE); 255 break; 256 case GV_SD_INITIALIZING: 257 if (bp->bio_cmd == BIO_READ) 258 goto bad; 259 break; 260 default: 261 /* All other subdisk states mean it's not accessible. */ 262 goto bad; 263 } 264 265 /* Clone the bio and adjust the offsets and sizes. */ 266 cbp = g_clone_bio(bp); 267 if (cbp == NULL) { 268 err = ENOMEM; 269 goto bad; 270 } 271 cbp->bio_offset = real_off + s->drive_offset; 272 cbp->bio_length = real_len; 273 cbp->bio_data = addr; 274 cbp->bio_done = gv_done; 275 cbp->bio_caller1 = s; 276 if ((bp->bio_cflags & GV_BIO_SYNCREQ)) 277 cbp->bio_cflags |= GV_BIO_SYNCREQ; 278 279 /* Store the sub-requests now and let others issue them. */ 280 bioq_insert_tail(p->bqueue, cbp); 281 return (real_len); 282bad: 283 G_VINUM_LOGREQ(0, bp, "plex request failed."); 284 /* Building the sub-request failed. If internal BIO, do not deliver. */ 285 if (bp->bio_cflags & GV_BIO_INTERNAL) { 286 if (bp->bio_cflags & GV_BIO_MALLOC) 287 g_free(bp->bio_data); 288 g_destroy_bio(bp); 289 p->flags &= ~(GV_PLEX_SYNCING | GV_PLEX_REBUILDING | 290 GV_PLEX_GROWING); 291 return (-1); 292 } 293 g_io_deliver(bp, err); 294 return (-1); 295} 296 297/* 298 * Handle a completed request to a striped or concatenated plex. 299 */ 300void 301gv_plex_normal_done(struct gv_plex *p, struct bio *bp) 302{ 303 struct bio *pbp; 304 305 pbp = bp->bio_parent; 306 if (pbp->bio_error == 0) 307 pbp->bio_error = bp->bio_error; 308 g_destroy_bio(bp); 309 pbp->bio_inbed++; 310 if (pbp->bio_children == pbp->bio_inbed) { 311 /* Just set it to length since multiple plexes will 312 * screw things up. */ 313 pbp->bio_completed = pbp->bio_length; 314 if (pbp->bio_cflags & GV_BIO_SYNCREQ) 315 gv_sync_complete(p, pbp); 316 else if (pbp->bio_pflags & GV_BIO_SYNCREQ) 317 gv_grow_complete(p, pbp); 318 else 319 g_io_deliver(pbp, pbp->bio_error); 320 } 321} 322 323/* 324 * Handle a completed request to a RAID-5 plex. 325 */ 326void 327gv_plex_raid5_done(struct gv_plex *p, struct bio *bp) 328{ 329 struct gv_softc *sc; 330 struct bio *cbp, *pbp; 331 struct gv_bioq *bq, *bq2; 332 struct gv_raid5_packet *wp; 333 off_t completed; 334 int i; 335 336 completed = 0; 337 sc = p->vinumconf; 338 wp = bp->bio_caller2; 339 340 switch (bp->bio_parent->bio_cmd) { 341 case BIO_READ: 342 if (wp == NULL) { 343 completed = bp->bio_completed; 344 break; 345 } 346 347 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) { 348 if (bq->bp != bp) 349 continue; 350 TAILQ_REMOVE(&wp->bits, bq, queue); 351 g_free(bq); 352 for (i = 0; i < wp->length; i++) 353 wp->data[i] ^= bp->bio_data[i]; 354 break; 355 } 356 if (TAILQ_EMPTY(&wp->bits)) { 357 completed = wp->length; 358 if (wp->lockbase != -1) { 359 TAILQ_REMOVE(&p->packets, wp, list); 360 /* Bring the waiting bios back into the game. */ 361 pbp = bioq_takefirst(p->wqueue); 362 while (pbp != NULL) { 363 gv_post_bio(sc, pbp); 364 pbp = bioq_takefirst(p->wqueue); 365 } 366 } 367 g_free(wp); 368 } 369 370 break; 371 372 case BIO_WRITE: 373 /* XXX can this ever happen? */ 374 if (wp == NULL) { 375 completed = bp->bio_completed; 376 break; 377 } 378 379 /* Check if we need to handle parity data. */ 380 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) { 381 if (bq->bp != bp) 382 continue; 383 TAILQ_REMOVE(&wp->bits, bq, queue); 384 g_free(bq); 385 cbp = wp->parity; 386 if (cbp != NULL) { 387 for (i = 0; i < wp->length; i++) 388 cbp->bio_data[i] ^= bp->bio_data[i]; 389 } 390 break; 391 } 392 393 /* Handle parity data. */ 394 if (TAILQ_EMPTY(&wp->bits)) { 395 if (bp->bio_parent->bio_cflags & GV_BIO_CHECK) 396 i = gv_check_parity(p, bp, wp); 397 else 398 i = gv_normal_parity(p, bp, wp); 399 400 /* All of our sub-requests have finished. */ 401 if (i) { 402 completed = wp->length; 403 TAILQ_REMOVE(&p->packets, wp, list); 404 /* Bring the waiting bios back into the game. */ 405 pbp = bioq_takefirst(p->wqueue); 406 while (pbp != NULL) { 407 gv_post_bio(sc, pbp); 408 pbp = bioq_takefirst(p->wqueue); 409 } 410 g_free(wp); 411 } 412 } 413 414 break; 415 } 416 417 pbp = bp->bio_parent; 418 if (pbp->bio_error == 0) 419 pbp->bio_error = bp->bio_error; 420 pbp->bio_completed += completed; 421 422 /* When the original request is finished, we deliver it. */ 423 pbp->bio_inbed++; 424 if (pbp->bio_inbed == pbp->bio_children) { 425 /* Hand it over for checking or delivery. */ 426 if (pbp->bio_cmd == BIO_WRITE && 427 (pbp->bio_cflags & GV_BIO_CHECK)) { 428 gv_parity_complete(p, pbp); 429 } else if (pbp->bio_cmd == BIO_WRITE && 430 (pbp->bio_cflags & GV_BIO_REBUILD)) { 431 gv_rebuild_complete(p, pbp); 432 } else if (pbp->bio_cflags & GV_BIO_INIT) { 433 gv_init_complete(p, pbp); 434 } else if (pbp->bio_cflags & GV_BIO_SYNCREQ) { 435 gv_sync_complete(p, pbp); 436 } else if (pbp->bio_pflags & GV_BIO_SYNCREQ) { 437 gv_grow_complete(p, pbp); 438 } else { 439 g_io_deliver(pbp, pbp->bio_error); 440 } 441 } 442 443 /* Clean up what we allocated. */ 444 if (bp->bio_cflags & GV_BIO_MALLOC) 445 g_free(bp->bio_data); 446 g_destroy_bio(bp); 447} 448 449static int 450gv_check_parity(struct gv_plex *p, struct bio *bp, struct gv_raid5_packet *wp) 451{ 452 struct bio *pbp; 453 struct gv_sd *s; 454 int err, finished, i; 455 456 err = 0; 457 finished = 1; 458 459 if (wp->waiting != NULL) { 460 pbp = wp->waiting; 461 wp->waiting = NULL; 462 s = pbp->bio_caller1; 463 g_io_request(pbp, s->drive_sc->consumer); 464 finished = 0; 465 466 } else if (wp->parity != NULL) { 467 pbp = wp->parity; 468 wp->parity = NULL; 469 470 /* Check if the parity is correct. */ 471 for (i = 0; i < wp->length; i++) { 472 if (bp->bio_data[i] != pbp->bio_data[i]) { 473 err = 1; 474 break; 475 } 476 } 477 478 /* The parity is not correct... */ 479 if (err) { 480 bp->bio_parent->bio_error = EAGAIN; 481 482 /* ... but we rebuild it. */ 483 if (bp->bio_parent->bio_cflags & GV_BIO_PARITY) { 484 s = pbp->bio_caller1; 485 g_io_request(pbp, s->drive_sc->consumer); 486 finished = 0; 487 } 488 } 489 490 /* 491 * Clean up the BIO we would have used for rebuilding the 492 * parity. 493 */ 494 if (finished) { 495 bp->bio_parent->bio_inbed++; 496 g_destroy_bio(pbp); 497 } 498 499 } 500 501 return (finished); 502} 503 504static int 505gv_normal_parity(struct gv_plex *p, struct bio *bp, struct gv_raid5_packet *wp) 506{ 507 struct bio *cbp, *pbp; 508 struct gv_sd *s; 509 int finished, i; 510 511 finished = 1; 512 513 if (wp->waiting != NULL) { 514 pbp = wp->waiting; 515 wp->waiting = NULL; 516 cbp = wp->parity; 517 for (i = 0; i < wp->length; i++) 518 cbp->bio_data[i] ^= pbp->bio_data[i]; 519 s = pbp->bio_caller1; 520 g_io_request(pbp, s->drive_sc->consumer); 521 finished = 0; 522 523 } else if (wp->parity != NULL) { 524 cbp = wp->parity; 525 wp->parity = NULL; 526 s = cbp->bio_caller1; 527 g_io_request(cbp, s->drive_sc->consumer); 528 finished = 0; 529 } 530 531 return (finished); 532} 533 534/* Flush the queue with delayed requests. */ 535static void 536gv_plex_flush(struct gv_plex *p) 537{ 538 struct gv_softc *sc; 539 struct bio *bp; 540 541 sc = p->vinumconf; 542 bp = bioq_takefirst(p->rqueue); 543 while (bp != NULL) { 544 gv_plex_start(p, bp); 545 bp = bioq_takefirst(p->rqueue); 546 } 547} 548 549int 550gv_sync_request(struct gv_plex *from, struct gv_plex *to, off_t offset, 551 off_t length, int type, caddr_t data) 552{ 553 struct gv_softc *sc; 554 struct bio *bp; 555 556 KASSERT(from != NULL, ("NULL from")); 557 KASSERT(to != NULL, ("NULL to")); 558 sc = from->vinumconf; 559 KASSERT(sc != NULL, ("NULL sc")); 560 561 bp = g_new_bio(); 562 if (bp == NULL) { 563 G_VINUM_DEBUG(0, "sync from '%s' failed at offset " 564 " %jd; out of memory", from->name, offset); 565 return (ENOMEM); 566 } 567 bp->bio_length = length; 568 bp->bio_done = gv_done; 569 bp->bio_cflags |= GV_BIO_SYNCREQ; 570 bp->bio_offset = offset; 571 bp->bio_caller1 = from; 572 bp->bio_caller2 = to; 573 bp->bio_cmd = type; 574 if (data == NULL) 575 data = g_malloc(length, M_WAITOK); 576 bp->bio_cflags |= GV_BIO_MALLOC; /* Free on the next run. */ 577 bp->bio_data = data; 578 579 /* Send down next. */ 580 gv_post_bio(sc, bp); 581 //gv_plex_start(from, bp); 582 return (0); 583} 584 585/* 586 * Handle a finished plex sync bio. 587 */ 588int 589gv_sync_complete(struct gv_plex *to, struct bio *bp) 590{ 591 struct gv_plex *from, *p; 592 struct gv_sd *s; 593 struct gv_volume *v; 594 struct gv_softc *sc; 595 off_t offset; 596 int err; 597 598 g_topology_assert_not(); 599 600 err = 0; 601 KASSERT(to != NULL, ("NULL to")); 602 KASSERT(bp != NULL, ("NULL bp")); 603 from = bp->bio_caller2; 604 KASSERT(from != NULL, ("NULL from")); 605 v = to->vol_sc; 606 KASSERT(v != NULL, ("NULL v")); 607 sc = v->vinumconf; 608 KASSERT(sc != NULL, ("NULL sc")); 609 610 /* If it was a read, write it. */ 611 if (bp->bio_cmd == BIO_READ) { 612 err = gv_sync_request(from, to, bp->bio_offset, bp->bio_length, 613 BIO_WRITE, bp->bio_data); 614 /* If it was a write, read the next one. */ 615 } else if (bp->bio_cmd == BIO_WRITE) { 616 if (bp->bio_cflags & GV_BIO_MALLOC) 617 g_free(bp->bio_data); 618 to->synced += bp->bio_length; 619 /* If we're finished, clean up. */ 620 if (bp->bio_offset + bp->bio_length >= from->size) { 621 G_VINUM_DEBUG(1, "syncing of %s from %s completed", 622 to->name, from->name); 623 /* Update our state. */ 624 LIST_FOREACH(s, &to->subdisks, in_plex) 625 gv_set_sd_state(s, GV_SD_UP, 0); 626 gv_update_plex_state(to); 627 to->flags &= ~GV_PLEX_SYNCING; 628 to->synced = 0; 629 gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0); 630 } else { 631 offset = bp->bio_offset + bp->bio_length; 632 err = gv_sync_request(from, to, offset, 633 MIN(bp->bio_length, from->size - offset), 634 BIO_READ, NULL); 635 } 636 } 637 g_destroy_bio(bp); 638 /* Clean up if there was an error. */ 639 if (err) { 640 to->flags &= ~GV_PLEX_SYNCING; 641 G_VINUM_DEBUG(0, "error syncing plexes: error code %d", err); 642 } 643 644 /* Check if all plexes are synced, and lower refcounts. */ 645 g_topology_lock(); 646 LIST_FOREACH(p, &v->plexes, in_volume) { 647 if (p->flags & GV_PLEX_SYNCING) { 648 g_topology_unlock(); 649 return (-1); 650 } 651 } 652 /* If we came here, all plexes are synced, and we're free. */ 653 gv_access(v->provider, -1, -1, 0); 654 g_topology_unlock(); 655 G_VINUM_DEBUG(1, "plex sync completed"); 656 gv_volume_flush(v); 657 return (0); 658} 659 660/* 661 * Create a new bio struct for the next grow request. 662 */ 663int 664gv_grow_request(struct gv_plex *p, off_t offset, off_t length, int type, 665 caddr_t data) 666{ 667 struct gv_softc *sc; 668 struct bio *bp; 669 670 KASSERT(p != NULL, ("gv_grow_request: NULL p")); 671 sc = p->vinumconf; 672 KASSERT(sc != NULL, ("gv_grow_request: NULL sc")); 673 674 bp = g_new_bio(); 675 if (bp == NULL) { 676 G_VINUM_DEBUG(0, "grow of %s failed creating bio: " 677 "out of memory", p->name); 678 return (ENOMEM); 679 } 680 681 bp->bio_cmd = type; 682 bp->bio_done = gv_done; 683 bp->bio_error = 0; 684 bp->bio_caller1 = p; 685 bp->bio_offset = offset; 686 bp->bio_length = length; 687 bp->bio_pflags |= GV_BIO_SYNCREQ; /* XXX: misuse of pflags AND syncreq.*/ 688 if (data == NULL) 689 data = g_malloc(length, M_WAITOK); 690 bp->bio_cflags |= GV_BIO_MALLOC; 691 bp->bio_data = data; 692 693 gv_post_bio(sc, bp); 694 //gv_plex_start(p, bp); 695 return (0); 696} 697 698/* 699 * Finish handling of a bio to a growing plex. 700 */ 701void 702gv_grow_complete(struct gv_plex *p, struct bio *bp) 703{ 704 struct gv_softc *sc; 705 struct gv_sd *s; 706 struct gv_volume *v; 707 off_t origsize, offset; 708 int sdcount, err; 709 710 v = p->vol_sc; 711 KASSERT(v != NULL, ("gv_grow_complete: NULL v")); 712 sc = v->vinumconf; 713 KASSERT(sc != NULL, ("gv_grow_complete: NULL sc")); 714 err = 0; 715 716 /* If it was a read, write it. */ 717 if (bp->bio_cmd == BIO_READ) { 718 p->synced += bp->bio_length; 719 err = gv_grow_request(p, bp->bio_offset, bp->bio_length, 720 BIO_WRITE, bp->bio_data); 721 /* If it was a write, read next. */ 722 } else if (bp->bio_cmd == BIO_WRITE) { 723 if (bp->bio_cflags & GV_BIO_MALLOC) 724 g_free(bp->bio_data); 725 726 /* Find the real size of the plex. */ 727 sdcount = gv_sdcount(p, 1); 728 s = LIST_FIRST(&p->subdisks); 729 KASSERT(s != NULL, ("NULL s")); 730 origsize = (s->size * (sdcount - 1)); 731 if (bp->bio_offset + bp->bio_length >= origsize) { 732 G_VINUM_DEBUG(1, "growing of %s completed", p->name); 733 p->flags &= ~GV_PLEX_GROWING; 734 LIST_FOREACH(s, &p->subdisks, in_plex) { 735 s->flags &= ~GV_SD_GROW; 736 gv_set_sd_state(s, GV_SD_UP, 0); 737 } 738 p->size = gv_plex_size(p); 739 gv_update_vol_size(v, gv_vol_size(v)); 740 gv_set_plex_state(p, GV_PLEX_UP, 0); 741 g_topology_lock(); 742 gv_access(v->provider, -1, -1, 0); 743 g_topology_unlock(); 744 p->synced = 0; 745 gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0); 746 /* Issue delayed requests. */ 747 gv_plex_flush(p); 748 } else { 749 offset = bp->bio_offset + bp->bio_length; 750 err = gv_grow_request(p, offset, 751 MIN(bp->bio_length, origsize - offset), 752 BIO_READ, NULL); 753 } 754 } 755 g_destroy_bio(bp); 756 757 if (err) { 758 p->flags &= ~GV_PLEX_GROWING; 759 G_VINUM_DEBUG(0, "error growing plex: error code %d", err); 760 } 761} 762 763 764/* 765 * Create an initialization BIO and send it off to the consumer. Assume that 766 * we're given initialization data as parameter. 767 */ 768void 769gv_init_request(struct gv_sd *s, off_t start, caddr_t data, off_t length) 770{ 771 struct gv_drive *d; 772 struct g_consumer *cp; 773 struct bio *bp, *cbp; 774 775 KASSERT(s != NULL, ("gv_init_request: NULL s")); 776 d = s->drive_sc; 777 KASSERT(d != NULL, ("gv_init_request: NULL d")); 778 cp = d->consumer; 779 KASSERT(cp != NULL, ("gv_init_request: NULL cp")); 780 781 bp = g_new_bio(); 782 if (bp == NULL) { 783 G_VINUM_DEBUG(0, "subdisk '%s' init: write failed at offset %jd" 784 " (drive offset %jd); out of memory", s->name, 785 (intmax_t)s->initialized, (intmax_t)start); 786 return; /* XXX: Error codes. */ 787 } 788 bp->bio_cmd = BIO_WRITE; 789 bp->bio_data = data; 790 bp->bio_done = gv_done; 791 bp->bio_error = 0; 792 bp->bio_length = length; 793 bp->bio_cflags |= GV_BIO_INIT; 794 bp->bio_offset = start; 795 bp->bio_caller1 = s; 796 797 /* Then ofcourse, we have to clone it. */ 798 cbp = g_clone_bio(bp); 799 if (cbp == NULL) { 800 G_VINUM_DEBUG(0, "subdisk '%s' init: write failed at offset %jd" 801 " (drive offset %jd); out of memory", s->name, 802 (intmax_t)s->initialized, (intmax_t)start); 803 return; /* XXX: Error codes. */ 804 } 805 cbp->bio_done = gv_done; 806 cbp->bio_caller1 = s; 807 /* Send it off to the consumer. */ 808 g_io_request(cbp, cp); 809} 810 811/* 812 * Handle a finished initialization BIO. 813 */ 814void 815gv_init_complete(struct gv_plex *p, struct bio *bp) 816{ 817 struct gv_softc *sc; 818 struct gv_drive *d; 819 struct g_consumer *cp; 820 struct gv_sd *s; 821 off_t start, length; 822 caddr_t data; 823 int error; 824 825 s = bp->bio_caller1; 826 start = bp->bio_offset; 827 length = bp->bio_length; 828 error = bp->bio_error; 829 data = bp->bio_data; 830 831 KASSERT(s != NULL, ("gv_init_complete: NULL s")); 832 d = s->drive_sc; 833 KASSERT(d != NULL, ("gv_init_complete: NULL d")); 834 cp = d->consumer; 835 KASSERT(cp != NULL, ("gv_init_complete: NULL cp")); 836 sc = p->vinumconf; 837 KASSERT(sc != NULL, ("gv_init_complete: NULL sc")); 838 839 g_destroy_bio(bp); 840 841 /* 842 * First we need to find out if it was okay, and abort if it's not. 843 * Then we need to free previous buffers, find out the correct subdisk, 844 * as well as getting the correct starting point and length of the BIO. 845 */ 846 if (start >= s->drive_offset + s->size) { 847 /* Free the data we initialized. */ 848 if (data != NULL) 849 g_free(data); 850 g_topology_assert_not(); 851 g_topology_lock(); 852 g_access(cp, 0, -1, 0); 853 g_topology_unlock(); 854 if (error) { 855 gv_set_sd_state(s, GV_SD_STALE, GV_SETSTATE_FORCE | 856 GV_SETSTATE_CONFIG); 857 } else { 858 gv_set_sd_state(s, GV_SD_UP, GV_SETSTATE_CONFIG); 859 s->initialized = 0; 860 gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0); 861 G_VINUM_DEBUG(1, "subdisk '%s' init: finished " 862 "successfully", s->name); 863 } 864 return; 865 } 866 s->initialized += length; 867 start += length; 868 gv_init_request(s, start, data, length); 869} 870 871/* 872 * Create a new bio struct for the next parity rebuild. Used both by internal 873 * rebuild of degraded plexes as well as user initiated rebuilds/checks. 874 */ 875void 876gv_parity_request(struct gv_plex *p, int flags, off_t offset) 877{ 878 struct gv_softc *sc; 879 struct bio *bp; 880 881 KASSERT(p != NULL, ("gv_parity_request: NULL p")); 882 sc = p->vinumconf; 883 KASSERT(sc != NULL, ("gv_parity_request: NULL sc")); 884 885 bp = g_new_bio(); 886 if (bp == NULL) { 887 G_VINUM_DEBUG(0, "rebuild of %s failed creating bio: " 888 "out of memory", p->name); 889 return; 890 } 891 892 bp->bio_cmd = BIO_WRITE; 893 bp->bio_done = gv_done; 894 bp->bio_error = 0; 895 bp->bio_length = p->stripesize; 896 bp->bio_caller1 = p; 897 898 /* 899 * Check if it's a rebuild of a degraded plex or a user request of 900 * parity rebuild. 901 */ 902 if (flags & GV_BIO_REBUILD) 903 bp->bio_data = g_malloc(GV_DFLT_SYNCSIZE, M_WAITOK); 904 else if (flags & GV_BIO_CHECK) 905 bp->bio_data = g_malloc(p->stripesize, M_WAITOK | M_ZERO); 906 else { 907 G_VINUM_DEBUG(0, "invalid flags given in rebuild"); 908 return; 909 } 910 911 bp->bio_cflags = flags; 912 bp->bio_cflags |= GV_BIO_MALLOC; 913 914 /* We still have more parity to build. */ 915 bp->bio_offset = offset; 916 gv_post_bio(sc, bp); 917 //gv_plex_start(p, bp); /* Send it down to the plex. */ 918} 919 920/* 921 * Handle a finished parity write. 922 */ 923void 924gv_parity_complete(struct gv_plex *p, struct bio *bp) 925{ 926 struct gv_softc *sc; 927 int error, flags; 928 929 error = bp->bio_error; 930 flags = bp->bio_cflags; 931 flags &= ~GV_BIO_MALLOC; 932 933 sc = p->vinumconf; 934 KASSERT(sc != NULL, ("gv_parity_complete: NULL sc")); 935 936 /* Clean up what we allocated. */ 937 if (bp->bio_cflags & GV_BIO_MALLOC) 938 g_free(bp->bio_data); 939 g_destroy_bio(bp); 940 941 if (error == EAGAIN) { 942 G_VINUM_DEBUG(0, "parity incorrect at offset 0x%jx", 943 (intmax_t)p->synced); 944 } 945 946 /* Any error is fatal, except EAGAIN when we're rebuilding. */ 947 if (error && !(error == EAGAIN && (flags & GV_BIO_PARITY))) { 948 /* Make sure we don't have the lock. */ 949 g_topology_assert_not(); 950 g_topology_lock(); 951 gv_access(p->vol_sc->provider, -1, -1, 0); 952 g_topology_unlock(); 953 G_VINUM_DEBUG(0, "parity check on %s failed at 0x%jx " 954 "errno %d", p->name, (intmax_t)p->synced, error); 955 return; 956 } else { 957 p->synced += p->stripesize; 958 } 959 960 if (p->synced >= p->size) { 961 /* Make sure we don't have the lock. */ 962 g_topology_assert_not(); 963 g_topology_lock(); 964 gv_access(p->vol_sc->provider, -1, -1, 0); 965 g_topology_unlock(); 966 /* We're finished. */ 967 G_VINUM_DEBUG(1, "parity operation on %s finished", p->name); 968 p->synced = 0; 969 gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0); 970 return; 971 } 972 973 /* Send down next. It will determine if we need to itself. */ 974 gv_parity_request(p, flags, p->synced); 975} 976 977/* 978 * Handle a finished plex rebuild bio. 979 */ 980void 981gv_rebuild_complete(struct gv_plex *p, struct bio *bp) 982{ 983 struct gv_softc *sc; 984 struct gv_sd *s; 985 int error, flags; 986 off_t offset; 987 988 error = bp->bio_error; 989 flags = bp->bio_cflags; 990 offset = bp->bio_offset; 991 flags &= ~GV_BIO_MALLOC; 992 sc = p->vinumconf; 993 KASSERT(sc != NULL, ("gv_rebuild_complete: NULL sc")); 994 995 /* Clean up what we allocated. */ 996 if (bp->bio_cflags & GV_BIO_MALLOC) 997 g_free(bp->bio_data); 998 g_destroy_bio(bp); 999 1000 if (error) { 1001 g_topology_assert_not(); 1002 g_topology_lock(); 1003 gv_access(p->vol_sc->provider, -1, -1, 0); 1004 g_topology_unlock(); 1005 1006 G_VINUM_DEBUG(0, "rebuild of %s failed at offset %jd errno: %d", 1007 p->name, (intmax_t)offset, error); 1008 p->flags &= ~GV_PLEX_REBUILDING; 1009 p->synced = 0; 1010 gv_plex_flush(p); /* Flush out remaining rebuild BIOs. */ 1011 return; 1012 } 1013 1014 offset += (p->stripesize * (gv_sdcount(p, 1) - 1)); 1015 if (offset >= p->size) { 1016 /* We're finished. */ 1017 g_topology_assert_not(); 1018 g_topology_lock(); 1019 gv_access(p->vol_sc->provider, -1, -1, 0); 1020 g_topology_unlock(); 1021 1022 G_VINUM_DEBUG(1, "rebuild of %s finished", p->name); 1023 gv_save_config(p->vinumconf); 1024 p->flags &= ~GV_PLEX_REBUILDING; 1025 p->synced = 0; 1026 /* Try to up all subdisks. */ 1027 LIST_FOREACH(s, &p->subdisks, in_plex) 1028 gv_update_sd_state(s); 1029 gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0); 1030 gv_plex_flush(p); /* Flush out remaining rebuild BIOs. */ 1031 return; 1032 } 1033 1034 /* Send down next. It will determine if we need to itself. */ 1035 gv_parity_request(p, flags, offset); 1036} 1037