geom_ccd.c revision 115953
1/* 2 * Copyright (c) 2003 Poul-Henning Kamp. 3 * Copyright (c) 1995 Jason R. Thorpe. 4 * Copyright (c) 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * All rights reserved. 7 * Copyright (c) 1988 University of Utah. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed for the NetBSD Project 24 * by Jason R. Thorpe. 25 * 4. The names of the authors may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * Dynamic configuration and disklabel support by: 41 * Jason R. Thorpe <thorpej@nas.nasa.gov> 42 * Numerical Aerodynamic Simulation Facility 43 * Mail Stop 258-6 44 * NASA Ames Research Center 45 * Moffett Field, CA 94035 46 * 47 * from: Utah $Hdr: cd.c 1.6 90/11/28$ 48 * 49 * @(#)cd.c 8.2 (Berkeley) 11/16/93 50 * 51 * $NetBSD: ccd.c,v 1.22 1995/12/08 19:13:26 thorpej Exp $ 52 * 53 * $FreeBSD: head/sys/geom/geom_ccd.c 115953 2003-06-07 11:06:44Z phk $ 54 */ 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/kernel.h> 59#include <sys/module.h> 60#include <sys/bio.h> 61#include <sys/malloc.h> 62#include <geom/geom.h> 63 64/* 65 * Number of blocks to untouched in front of a component partition. 66 * This is to avoid violating its disklabel area when it starts at the 67 * beginning of the slice. 68 */ 69#if !defined(CCD_OFFSET) 70#define CCD_OFFSET 16 71#endif 72 73/* sc_flags */ 74#define CCDF_UNIFORM 0x02 /* use LCCD of sizes for uniform interleave */ 75#define CCDF_MIRROR 0x04 /* use mirroring */ 76 77/* Mask of user-settable ccd flags. */ 78#define CCDF_USERMASK (CCDF_UNIFORM|CCDF_MIRROR) 79 80/* 81 * Interleave description table. 82 * Computed at boot time to speed irregular-interleave lookups. 83 * The idea is that we interleave in "groups". First we interleave 84 * evenly over all component disks up to the size of the smallest 85 * component (the first group), then we interleave evenly over all 86 * remaining disks up to the size of the next-smallest (second group), 87 * and so on. 88 * 89 * Each table entry describes the interleave characteristics of one 90 * of these groups. For example if a concatenated disk consisted of 91 * three components of 5, 3, and 7 DEV_BSIZE blocks interleaved at 92 * DEV_BSIZE (1), the table would have three entries: 93 * 94 * ndisk startblk startoff dev 95 * 3 0 0 0, 1, 2 96 * 2 9 3 0, 2 97 * 1 13 5 2 98 * 0 - - - 99 * 100 * which says that the first nine blocks (0-8) are interleaved over 101 * 3 disks (0, 1, 2) starting at block offset 0 on any component disk, 102 * the next 4 blocks (9-12) are interleaved over 2 disks (0, 2) starting 103 * at component block 3, and the remaining blocks (13-14) are on disk 104 * 2 starting at offset 5. 105 */ 106struct ccdiinfo { 107 int ii_ndisk; /* # of disks range is interleaved over */ 108 daddr_t ii_startblk; /* starting scaled block # for range */ 109 daddr_t ii_startoff; /* starting component offset (block #) */ 110 int *ii_index; /* ordered list of components in range */ 111}; 112 113/* 114 * Component info table. 115 * Describes a single component of a concatenated disk. 116 */ 117struct ccdcinfo { 118 size_t ci_size; /* size */ 119 struct g_provider *ci_provider; /* provider */ 120 struct g_consumer *ci_consumer; /* consumer */ 121}; 122 123/* 124 * A concatenated disk is described by this structure. 125 */ 126 127struct ccd_s { 128 LIST_ENTRY(ccd_s) list; 129 130 int sc_unit; /* logical unit number */ 131 int sc_flags; /* flags */ 132 size_t sc_size; /* size of ccd */ 133 int sc_ileave; /* interleave */ 134 u_int sc_ndisks; /* number of components */ 135 struct ccdcinfo *sc_cinfo; /* component info */ 136 struct ccdiinfo *sc_itable; /* interleave table */ 137 u_int32_t sc_secsize; /* # bytes per sector */ 138 int sc_pick; /* side of mirror picked */ 139 daddr_t sc_blk[2]; /* mirror localization */ 140}; 141 142static g_start_t g_ccd_start; 143static void ccdiodone(struct bio *bp); 144static void ccdinterleave(struct ccd_s *); 145static int ccdinit(struct gctl_req *req, struct ccd_s *); 146static int ccdbuffer(struct bio **ret, struct ccd_s *, 147 struct bio *, daddr_t, caddr_t, long); 148 149static void 150g_ccd_orphan(struct g_consumer *cp) 151{ 152 /* 153 * XXX: We don't do anything here. It is not obvious 154 * XXX: what DTRT would be, so we do what the previous 155 * XXX: code did: ignore it and let the user cope. 156 */ 157} 158 159static int 160g_ccd_access(struct g_provider *pp, int dr, int dw, int de) 161{ 162 struct g_geom *gp; 163 struct g_consumer *cp1, *cp2; 164 int error; 165 166 de += dr; 167 de += dw; 168 169 gp = pp->geom; 170 error = ENXIO; 171 LIST_FOREACH(cp1, &gp->consumer, consumer) { 172 error = g_access_rel(cp1, dr, dw, de); 173 if (error) { 174 LIST_FOREACH(cp2, &gp->consumer, consumer) { 175 if (cp1 == cp2) 176 break; 177 g_access_rel(cp1, -dr, -dw, -de); 178 } 179 break; 180 } 181 } 182 return (error); 183} 184 185/* 186 * Free the softc and its substructures. 187 */ 188static void 189g_ccd_freesc(struct ccd_s *sc) 190{ 191 struct ccdiinfo *ii; 192 193 g_free(sc->sc_cinfo); 194 if (sc->sc_itable != NULL) { 195 for (ii = sc->sc_itable; ii->ii_ndisk > 0; ii++) 196 if (ii->ii_index != NULL) 197 g_free(ii->ii_index); 198 g_free(sc->sc_itable); 199 } 200 g_free(sc); 201} 202 203 204static int 205ccdinit(struct gctl_req *req, struct ccd_s *cs) 206{ 207 struct ccdcinfo *ci; 208 size_t size; 209 int ix; 210 size_t minsize; 211 int maxsecsize; 212 off_t mediasize; 213 u_int sectorsize; 214 215 cs->sc_size = 0; 216 217 maxsecsize = 0; 218 minsize = 0; 219 for (ix = 0; ix < cs->sc_ndisks; ix++) { 220 ci = &cs->sc_cinfo[ix]; 221 222 mediasize = ci->ci_provider->mediasize; 223 sectorsize = ci->ci_provider->sectorsize; 224 if (sectorsize > maxsecsize) 225 maxsecsize = sectorsize; 226 size = mediasize / DEV_BSIZE - CCD_OFFSET; 227 228 /* Truncate to interleave boundary */ 229 230 if (cs->sc_ileave > 1) 231 size -= size % cs->sc_ileave; 232 233 if (size == 0) { 234 gctl_error(req, "Component %s has effective size zero", 235 ci->ci_provider->name); 236 return(ENODEV); 237 } 238 239 if (minsize == 0 || size < minsize) 240 minsize = size; 241 ci->ci_size = size; 242 cs->sc_size += size; 243 } 244 245 /* 246 * Don't allow the interleave to be smaller than 247 * the biggest component sector. 248 */ 249 if ((cs->sc_ileave > 0) && 250 (cs->sc_ileave < (maxsecsize / DEV_BSIZE))) { 251 gctl_error(req, "Interleave to small for sector size"); 252 return(EINVAL); 253 } 254 255 /* 256 * If uniform interleave is desired set all sizes to that of 257 * the smallest component. This will guarentee that a single 258 * interleave table is generated. 259 * 260 * Lost space must be taken into account when calculating the 261 * overall size. Half the space is lost when CCDF_MIRROR is 262 * specified. 263 */ 264 if (cs->sc_flags & CCDF_UNIFORM) { 265 for (ix = 0; ix < cs->sc_ndisks; ix++) { 266 ci = &cs->sc_cinfo[ix]; 267 ci->ci_size = minsize; 268 } 269 cs->sc_size = cs->sc_ndisks * minsize; 270 } 271 272 if (cs->sc_flags & CCDF_MIRROR) { 273 /* 274 * Check to see if an even number of components 275 * have been specified. The interleave must also 276 * be non-zero in order for us to be able to 277 * guarentee the topology. 278 */ 279 if (cs->sc_ndisks % 2) { 280 gctl_error(req, 281 "Mirroring requires an even number of disks"); 282 return(EINVAL); 283 } 284 if (cs->sc_ileave == 0) { 285 gctl_error(req, 286 "An interleave must be specified when mirroring"); 287 return(EINVAL); 288 } 289 cs->sc_size = (cs->sc_ndisks/2) * minsize; 290 } 291 292 /* 293 * Construct the interleave table. 294 */ 295 ccdinterleave(cs); 296 297 /* 298 * Create pseudo-geometry based on 1MB cylinders. It's 299 * pretty close. 300 */ 301 cs->sc_secsize = maxsecsize; 302 303 return (0); 304} 305 306static void 307ccdinterleave(struct ccd_s *cs) 308{ 309 struct ccdcinfo *ci, *smallci; 310 struct ccdiinfo *ii; 311 daddr_t bn, lbn; 312 int ix; 313 u_long size; 314 315 316 /* 317 * Allocate an interleave table. The worst case occurs when each 318 * of N disks is of a different size, resulting in N interleave 319 * tables. 320 * 321 * Chances are this is too big, but we don't care. 322 */ 323 size = (cs->sc_ndisks + 1) * sizeof(struct ccdiinfo); 324 cs->sc_itable = g_malloc(size, M_WAITOK | M_ZERO); 325 326 /* 327 * Trivial case: no interleave (actually interleave of disk size). 328 * Each table entry represents a single component in its entirety. 329 * 330 * An interleave of 0 may not be used with a mirror setup. 331 */ 332 if (cs->sc_ileave == 0) { 333 bn = 0; 334 ii = cs->sc_itable; 335 336 for (ix = 0; ix < cs->sc_ndisks; ix++) { 337 /* Allocate space for ii_index. */ 338 ii->ii_index = g_malloc(sizeof(int), M_WAITOK); 339 ii->ii_ndisk = 1; 340 ii->ii_startblk = bn; 341 ii->ii_startoff = 0; 342 ii->ii_index[0] = ix; 343 bn += cs->sc_cinfo[ix].ci_size; 344 ii++; 345 } 346 ii->ii_ndisk = 0; 347 return; 348 } 349 350 /* 351 * The following isn't fast or pretty; it doesn't have to be. 352 */ 353 size = 0; 354 bn = lbn = 0; 355 for (ii = cs->sc_itable; ; ii++) { 356 /* 357 * Allocate space for ii_index. We might allocate more then 358 * we use. 359 */ 360 ii->ii_index = g_malloc((sizeof(int) * cs->sc_ndisks), 361 M_WAITOK); 362 363 /* 364 * Locate the smallest of the remaining components 365 */ 366 smallci = NULL; 367 for (ci = cs->sc_cinfo; ci < &cs->sc_cinfo[cs->sc_ndisks]; 368 ci++) { 369 if (ci->ci_size > size && 370 (smallci == NULL || 371 ci->ci_size < smallci->ci_size)) { 372 smallci = ci; 373 } 374 } 375 376 /* 377 * Nobody left, all done 378 */ 379 if (smallci == NULL) { 380 ii->ii_ndisk = 0; 381 g_free(ii->ii_index); 382 ii->ii_index = NULL; 383 break; 384 } 385 386 /* 387 * Record starting logical block using an sc_ileave blocksize. 388 */ 389 ii->ii_startblk = bn / cs->sc_ileave; 390 391 /* 392 * Record starting component block using an sc_ileave 393 * blocksize. This value is relative to the beginning of 394 * a component disk. 395 */ 396 ii->ii_startoff = lbn; 397 398 /* 399 * Determine how many disks take part in this interleave 400 * and record their indices. 401 */ 402 ix = 0; 403 for (ci = cs->sc_cinfo; 404 ci < &cs->sc_cinfo[cs->sc_ndisks]; ci++) { 405 if (ci->ci_size >= smallci->ci_size) { 406 ii->ii_index[ix++] = ci - cs->sc_cinfo; 407 } 408 } 409 ii->ii_ndisk = ix; 410 bn += ix * (smallci->ci_size - size); 411 lbn = smallci->ci_size / cs->sc_ileave; 412 size = smallci->ci_size; 413 } 414} 415 416static void 417g_ccd_start(struct bio *bp) 418{ 419 long bcount, rcount; 420 struct bio *cbp[2]; 421 caddr_t addr; 422 daddr_t bn; 423 int err; 424 int sent; 425 struct ccd_s *cs; 426 427 cs = bp->bio_to->geom->softc; 428 429 /* 430 * Translate the partition-relative block number to an absolute. 431 */ 432 bn = bp->bio_offset / cs->sc_secsize; 433 434 /* 435 * Allocate component buffers and fire off the requests 436 */ 437 addr = bp->bio_data; 438 sent = 0; 439 for (bcount = bp->bio_length; bcount > 0; bcount -= rcount) { 440 err = ccdbuffer(cbp, cs, bp, bn, addr, bcount); 441 if (err) { 442 printf("ccdbuffer error %d\n", err); 443 if (!sent) 444 biofinish(bp, NULL, err); 445 else { 446 /* 447 * XXX: maybe a race where the partners 448 * XXX: we sent already have been in 449 * XXX: ccdiodone(). Single-threaded g_down 450 * XXX: may protect against this. 451 */ 452 bp->bio_resid -= bcount; 453 bp->bio_error = err; 454 bp->bio_flags |= BIO_ERROR; 455 } 456 return; 457 } 458 rcount = cbp[0]->bio_length; 459 460 if (cs->sc_flags & CCDF_MIRROR) { 461 /* 462 * Mirroring. Writes go to both disks, reads are 463 * taken from whichever disk seems most appropriate. 464 * 465 * We attempt to localize reads to the disk whos arm 466 * is nearest the read request. We ignore seeks due 467 * to writes when making this determination and we 468 * also try to avoid hogging. 469 */ 470 if (cbp[0]->bio_cmd != BIO_READ) { 471 g_io_request(cbp[0], cbp[0]->bio_from); 472 g_io_request(cbp[1], cbp[1]->bio_from); 473 sent++; 474 } else { 475 int pick = cs->sc_pick; 476 daddr_t range = cs->sc_size / 16; 477 478 if (bn < cs->sc_blk[pick] - range || 479 bn > cs->sc_blk[pick] + range 480 ) { 481 cs->sc_pick = pick = 1 - pick; 482 } 483 cs->sc_blk[pick] = bn + btodb(rcount); 484 g_io_request(cbp[pick], cbp[pick]->bio_from); 485 sent++; 486 } 487 } else { 488 /* 489 * Not mirroring 490 */ 491 g_io_request(cbp[0], cbp[0]->bio_from); 492 sent++; 493 } 494 bn += btodb(rcount); 495 addr += rcount; 496 } 497} 498 499/* 500 * Build a component buffer header. 501 */ 502static int 503ccdbuffer(struct bio **cb, struct ccd_s *cs, struct bio *bp, daddr_t bn, caddr_t addr, long bcount) 504{ 505 struct ccdcinfo *ci, *ci2 = NULL; /* XXX */ 506 struct bio *cbp; 507 daddr_t cbn, cboff; 508 off_t cbc; 509 510 /* 511 * Determine which component bn falls in. 512 */ 513 cbn = bn; 514 cboff = 0; 515 516 if (cs->sc_ileave == 0) { 517 /* 518 * Serially concatenated and neither a mirror nor a parity 519 * config. This is a special case. 520 */ 521 daddr_t sblk; 522 523 sblk = 0; 524 for (ci = cs->sc_cinfo; cbn >= sblk + ci->ci_size; ci++) 525 sblk += ci->ci_size; 526 cbn -= sblk; 527 } else { 528 struct ccdiinfo *ii; 529 int ccdisk, off; 530 531 /* 532 * Calculate cbn, the logical superblock (sc_ileave chunks), 533 * and cboff, a normal block offset (DEV_BSIZE chunks) relative 534 * to cbn. 535 */ 536 cboff = cbn % cs->sc_ileave; /* DEV_BSIZE gran */ 537 cbn = cbn / cs->sc_ileave; /* DEV_BSIZE * ileave gran */ 538 539 /* 540 * Figure out which interleave table to use. 541 */ 542 for (ii = cs->sc_itable; ii->ii_ndisk; ii++) { 543 if (ii->ii_startblk > cbn) 544 break; 545 } 546 ii--; 547 548 /* 549 * off is the logical superblock relative to the beginning 550 * of this interleave block. 551 */ 552 off = cbn - ii->ii_startblk; 553 554 /* 555 * We must calculate which disk component to use (ccdisk), 556 * and recalculate cbn to be the superblock relative to 557 * the beginning of the component. This is typically done by 558 * adding 'off' and ii->ii_startoff together. However, 'off' 559 * must typically be divided by the number of components in 560 * this interleave array to be properly convert it from a 561 * CCD-relative logical superblock number to a 562 * component-relative superblock number. 563 */ 564 if (ii->ii_ndisk == 1) { 565 /* 566 * When we have just one disk, it can't be a mirror 567 * or a parity config. 568 */ 569 ccdisk = ii->ii_index[0]; 570 cbn = ii->ii_startoff + off; 571 } else { 572 if (cs->sc_flags & CCDF_MIRROR) { 573 /* 574 * We have forced a uniform mapping, resulting 575 * in a single interleave array. We double 576 * up on the first half of the available 577 * components and our mirror is in the second 578 * half. This only works with a single 579 * interleave array because doubling up 580 * doubles the number of sectors, so there 581 * cannot be another interleave array because 582 * the next interleave array's calculations 583 * would be off. 584 */ 585 int ndisk2 = ii->ii_ndisk / 2; 586 ccdisk = ii->ii_index[off % ndisk2]; 587 cbn = ii->ii_startoff + off / ndisk2; 588 ci2 = &cs->sc_cinfo[ccdisk + ndisk2]; 589 } else { 590 ccdisk = ii->ii_index[off % ii->ii_ndisk]; 591 cbn = ii->ii_startoff + off / ii->ii_ndisk; 592 } 593 } 594 595 ci = &cs->sc_cinfo[ccdisk]; 596 597 /* 598 * Convert cbn from a superblock to a normal block so it 599 * can be used to calculate (along with cboff) the normal 600 * block index into this particular disk. 601 */ 602 cbn *= cs->sc_ileave; 603 } 604 605 /* 606 * Fill in the component buf structure. 607 */ 608 cbp = g_clone_bio(bp); 609 /* XXX: check for NULL */ 610 cbp->bio_done = g_std_done; 611 cbp->bio_offset = dbtob(cbn + cboff + CCD_OFFSET); 612 cbp->bio_data = addr; 613 if (cs->sc_ileave == 0) 614 cbc = dbtob((off_t)(ci->ci_size - cbn)); 615 else 616 cbc = dbtob((off_t)(cs->sc_ileave - cboff)); 617 cbp->bio_length = (cbc < bcount) ? cbc : bcount; 618 619 cbp->bio_from = ci->ci_consumer; 620 cb[0] = cbp; 621 622 if (cs->sc_flags & CCDF_MIRROR) { 623 cbp = g_clone_bio(bp); 624 /* XXX: check for NULL */ 625 cbp->bio_done = cb[0]->bio_done = ccdiodone; 626 cbp->bio_offset = cb[0]->bio_offset; 627 cbp->bio_data = cb[0]->bio_data; 628 cbp->bio_length = cb[0]->bio_length; 629 cbp->bio_from = ci2->ci_consumer; 630 cbp->bio_caller1 = cb[0]; 631 cb[0]->bio_caller1 = cbp; 632 cb[1] = cbp; 633 } 634 return (0); 635} 636 637/* 638 * Called only for mirrored operations. 639 */ 640static void 641ccdiodone(struct bio *cbp) 642{ 643 struct bio *mbp, *pbp; 644 645 mbp = cbp->bio_caller1; 646 pbp = cbp->bio_parent; 647 648 if (pbp->bio_cmd == BIO_READ) { 649 if (cbp->bio_error == 0) { 650 /* We will not be needing the partner bio */ 651 if (mbp != NULL) { 652 pbp->bio_inbed++; 653 g_destroy_bio(mbp); 654 } 655 g_std_done(cbp); 656 return; 657 } 658 if (mbp != NULL) { 659 /* Try partner the bio instead */ 660 mbp->bio_caller1 = NULL; 661 pbp->bio_inbed++; 662 g_destroy_bio(cbp); 663 g_io_request(mbp, mbp->bio_from); 664 /* 665 * XXX: If this comes back OK, we should actually 666 * try to write the good data on the failed mirror 667 */ 668 return; 669 } 670 g_std_done(cbp); 671 } 672 if (mbp != NULL) { 673 mbp->bio_caller1 = NULL; 674 pbp->bio_inbed++; 675 if (cbp->bio_error != 0 && pbp->bio_error == 0) 676 pbp->bio_error = cbp->bio_error; 677 return; 678 } 679 g_std_done(cbp); 680} 681 682static void 683g_ccd_create(struct gctl_req *req, struct g_class *mp) 684{ 685 int *unit, *ileave, *nprovider; 686 struct g_geom *gp; 687 struct g_consumer *cp; 688 struct g_provider *pp; 689 struct ccd_s *sc; 690 struct sbuf *sb; 691 char buf[20]; 692 int i, error; 693 694 g_topology_assert(); 695 unit = gctl_get_paraml(req, "unit", sizeof (*unit)); 696 ileave = gctl_get_paraml(req, "ileave", sizeof (*ileave)); 697 nprovider = gctl_get_paraml(req, "nprovider", sizeof (*nprovider)); 698 699 /* Check for duplicate unit */ 700 LIST_FOREACH(gp, &mp->geom, geom) { 701 sc = gp->softc; 702 if (sc->sc_unit == *unit) { 703 gctl_error(req, "Unit %d already configured", *unit); 704 return; 705 } 706 } 707 708 if (*nprovider <= 0) { 709 gctl_error(req, "Bogus nprovider argument (= %d)", *nprovider); 710 return; 711 } 712 713 /* Check all providers are valid */ 714 for (i = 0; i < *nprovider; i++) { 715 sprintf(buf, "provider%d", i); 716 pp = gctl_get_provider(req, buf); 717 if (pp == NULL) 718 return; 719 } 720 721 gp = g_new_geomf(mp, "ccd%d", *unit); 722 gp->start = g_ccd_start; 723 gp->orphan = g_ccd_orphan; 724 gp->access = g_ccd_access; 725 sc = g_malloc(sizeof *sc, M_WAITOK | M_ZERO); 726 gp->softc = sc; 727 sc->sc_ndisks = *nprovider; 728 729 /* Allocate space for the component info. */ 730 sc->sc_cinfo = g_malloc(sc->sc_ndisks * sizeof(struct ccdcinfo), 731 M_WAITOK | M_ZERO); 732 733 /* Create consumers and attach to all providers */ 734 for (i = 0; i < *nprovider; i++) { 735 sprintf(buf, "provider%d", i); 736 pp = gctl_get_provider(req, buf); 737 cp = g_new_consumer(gp); 738 error = g_attach(cp, pp); 739 KASSERT(error == 0, ("attach to %s failed", pp->name)); 740 sc->sc_cinfo[i].ci_consumer = cp; 741 sc->sc_cinfo[i].ci_provider = pp; 742 } 743 744 sc->sc_unit = *unit; 745 sc->sc_ileave = *ileave; 746 747 if (gctl_get_param(req, "uniform", NULL)) 748 sc->sc_flags |= CCDF_UNIFORM; 749 if (gctl_get_param(req, "mirror", NULL)) 750 sc->sc_flags |= CCDF_MIRROR; 751 752 if (sc->sc_ileave == 0 && (sc->sc_flags & CCDF_MIRROR)) { 753 printf("%s: disabling mirror, interleave is 0\n", gp->name); 754 sc->sc_flags &= ~(CCDF_MIRROR); 755 } 756 757 if ((sc->sc_flags & CCDF_MIRROR) && !(sc->sc_flags & CCDF_UNIFORM)) { 758 printf("%s: mirror/parity forces uniform flag\n", gp->name); 759 sc->sc_flags |= CCDF_UNIFORM; 760 } 761 762 error = ccdinit(req, sc); 763 if (error != 0) { 764 g_ccd_freesc(sc); 765 gp->softc = NULL; 766 g_wither_geom(gp, ENXIO); 767 return; 768 } 769 770 pp = g_new_providerf(gp, "%s", gp->name); 771 pp->mediasize = sc->sc_size * (off_t)sc->sc_secsize; 772 pp->sectorsize = sc->sc_secsize; 773 g_error_provider(pp, 0); 774 775 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 776 sbuf_clear(sb); 777 sbuf_printf(sb, "ccd%d: %d components ", sc->sc_unit, *nprovider); 778 for (i = 0; i < *nprovider; i++) { 779 sbuf_printf(sb, "%s%s", 780 i == 0 ? "(" : ", ", 781 sc->sc_cinfo[i].ci_provider->name); 782 } 783 sbuf_printf(sb, "), %jd blocks ", (off_t)pp->mediasize / DEV_BSIZE); 784 if (sc->sc_ileave != 0) 785 sbuf_printf(sb, "interleaved at %d blocks\n", 786 sc->sc_ileave); 787 else 788 sbuf_printf(sb, "concatenated\n"); 789 sbuf_finish(sb); 790 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 791 sbuf_delete(sb); 792} 793 794static void 795g_ccd_destroy(struct gctl_req *req, struct g_class *mp) 796{ 797 struct g_geom *gp; 798 struct g_provider *pp; 799 struct ccd_s *sc; 800 801 g_topology_assert(); 802 gp = gctl_get_geom(req, mp, "geom"); 803 if (gp == NULL) 804 return; 805 sc = gp->softc; 806 pp = LIST_FIRST(&gp->provider); 807 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) { 808 gctl_error(req, "%s is open(r%dw%de%d)", gp->name, 809 pp->acr, pp->acw, pp->ace); 810 return; 811 } 812 g_ccd_freesc(sc); 813 gp->softc = NULL; 814 g_wither_geom(gp, ENXIO); 815} 816 817static void 818g_ccd_list(struct gctl_req *req, struct g_class *mp) 819{ 820 struct sbuf *sb; 821 struct ccd_s *cs; 822 struct g_geom *gp; 823 int i, unit, *up; 824 825 up = gctl_get_paraml(req, "unit", sizeof (int)); 826 unit = *up; 827 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 828 sbuf_clear(sb); 829 LIST_FOREACH(gp, &mp->geom, geom) { 830 cs = gp->softc; 831 if (unit >= 0 && unit != cs->sc_unit) 832 continue; 833 sbuf_printf(sb, "ccd%d\t\t%d\t%d\t", 834 cs->sc_unit, cs->sc_ileave, cs->sc_flags & CCDF_USERMASK); 835 836 for (i = 0; i < cs->sc_ndisks; ++i) { 837 sbuf_printf(sb, "%s/dev/%s", i == 0 ? "" : " ", 838 cs->sc_cinfo[i].ci_provider->name); 839 } 840 sbuf_printf(sb, "\n"); 841 } 842 sbuf_finish(sb); 843 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 844 sbuf_delete(sb); 845} 846 847static void 848g_ccd_config(struct gctl_req *req, struct g_class *mp, char const *verb) 849{ 850 851 g_topology_assert(); 852 if (!strcmp(verb, "create geom")) { 853 g_ccd_create(req, mp); 854 } else if (!strcmp(verb, "destroy geom")) { 855 g_ccd_destroy(req, mp); 856 } else if (!strcmp(verb, "list")) { 857 g_ccd_list(req, mp); 858 } else { 859 gctl_error(req, "unknown verb"); 860 } 861} 862 863static struct g_class g_ccd_class = { 864 .name = "CCD", 865 .ctlreq = g_ccd_config, 866}; 867 868DECLARE_GEOM_CLASS(g_ccd_class, g_ccd); 869