g_part.c revision 169398
1/*- 2 * Copyright (c) 2002, 2005, 2006, 2007 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/geom/part/g_part.c 169398 2007-05-09 01:46:42Z marcel $"); 29 30#include <sys/param.h> 31#include <sys/bio.h> 32#include <sys/diskmbr.h> 33#include <sys/endian.h> 34#include <sys/kernel.h> 35#include <sys/kobj.h> 36#include <sys/limits.h> 37#include <sys/lock.h> 38#include <sys/malloc.h> 39#include <sys/mutex.h> 40#include <sys/queue.h> 41#include <sys/sbuf.h> 42#include <sys/systm.h> 43#include <sys/uuid.h> 44#include <geom/geom.h> 45#include <geom/geom_ctl.h> 46#include <geom/part/g_part.h> 47 48#include "g_part_if.h" 49 50static kobj_method_t g_part_null_methods[] = { 51 { 0, 0 } 52}; 53 54static struct g_part_scheme g_part_null_scheme = { 55 "n/a", 56 g_part_null_methods, 57 sizeof(struct g_part_table), 58}; 59G_PART_SCHEME_DECLARE(g_part_null_scheme); 60 61SET_DECLARE(g_part_scheme_set, struct g_part_scheme); 62 63struct g_part_alias_list { 64 const char *lexeme; 65 enum g_part_alias alias; 66} g_part_alias_list[G_PART_ALIAS_COUNT] = { 67 { "efi", G_PART_ALIAS_EFI }, 68 { "freebsd", G_PART_ALIAS_FREEBSD }, 69 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 70 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 71 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 72 { "mbr", G_PART_ALIAS_MBR } 73}; 74 75/* 76 * The GEOM partitioning class. 77 */ 78static g_ctl_req_t g_part_ctlreq; 79static g_ctl_destroy_geom_t g_part_destroy_geom; 80static g_taste_t g_part_taste; 81 82static g_access_t g_part_access; 83static g_dumpconf_t g_part_dumpconf; 84static g_orphan_t g_part_orphan; 85static g_spoiled_t g_part_spoiled; 86static g_start_t g_part_start; 87 88static struct g_class g_part_class = { 89 .name = "PART", 90 .version = G_VERSION, 91 /* Class methods. */ 92 .ctlreq = g_part_ctlreq, 93 .destroy_geom = g_part_destroy_geom, 94 .taste = g_part_taste, 95 /* Geom methods. */ 96 .access = g_part_access, 97 .dumpconf = g_part_dumpconf, 98 .orphan = g_part_orphan, 99 .spoiled = g_part_spoiled, 100 .start = g_part_start, 101}; 102 103DECLARE_GEOM_CLASS(g_part_class, g_part); 104 105enum g_part_ctl { 106 G_PART_CTL_NONE, 107 G_PART_CTL_ADD, 108 G_PART_CTL_COMMIT, 109 G_PART_CTL_CREATE, 110 G_PART_CTL_DELETE, 111 G_PART_CTL_DESTROY, 112 G_PART_CTL_MODIFY, 113 G_PART_CTL_MOVE, 114 G_PART_CTL_RECOVER, 115 G_PART_CTL_RESIZE, 116 G_PART_CTL_UNDO 117}; 118 119/* 120 * Support functions. 121 */ 122 123static void g_part_wither(struct g_geom *, int); 124 125const char * 126g_part_alias_name(enum g_part_alias alias) 127{ 128 int i; 129 130 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 131 if (g_part_alias_list[i].alias != alias) 132 continue; 133 return (g_part_alias_list[i].lexeme); 134 } 135 136 return (NULL); 137} 138 139struct g_part_entry * 140g_part_new_entry(struct g_part_table *table, int index, quad_t start, 141 quad_t end) 142{ 143 struct g_part_entry *entry, *last; 144 145 last = NULL; 146 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 147 if (entry->gpe_index == index) 148 break; 149 if (entry->gpe_index > index) { 150 entry = NULL; 151 break; 152 } 153 last = entry; 154 } 155 if (entry == NULL) { 156 entry = g_malloc(table->gpt_scheme->gps_entrysz, 157 M_WAITOK | M_ZERO); 158 entry->gpe_index = index; 159 if (last == NULL) 160 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 161 else 162 LIST_INSERT_AFTER(last, entry, gpe_entry); 163 } 164 entry->gpe_start = start; 165 entry->gpe_end = end; 166 return (entry); 167} 168 169static void 170g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 171 struct g_part_entry *entry) 172{ 173 char buf[32]; 174 struct g_consumer *cp; 175 struct g_provider *pp; 176 177 cp = LIST_FIRST(&gp->consumer); 178 pp = cp->provider; 179 180 entry->gpe_offset = entry->gpe_start * pp->sectorsize; 181 182 if (entry->gpe_pp == NULL) { 183 entry->gpe_pp = g_new_providerf(gp, "%s%s", gp->name, 184 G_PART_NAME(table, entry, buf, sizeof(buf))); 185 entry->gpe_pp->private = entry; /* Close the circle. */ 186 } 187 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 188 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 189 pp->sectorsize; 190 entry->gpe_pp->sectorsize = pp->sectorsize; 191 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 192 if (pp->stripesize > 0) { 193 entry->gpe_pp->stripesize = pp->stripesize; 194 entry->gpe_pp->stripeoffset = (pp->stripeoffset + 195 entry->gpe_offset) % pp->stripesize; 196 } 197 g_error_provider(entry->gpe_pp, 0); 198} 199 200static int 201g_part_parm_geom(const char *p, struct g_geom **v) 202{ 203 struct g_geom *gp; 204 205 LIST_FOREACH(gp, &g_part_class.geom, geom) { 206 if (!strcmp(p, gp->name)) 207 break; 208 } 209 if (gp == NULL) 210 return (EINVAL); 211 *v = gp; 212 return (0); 213} 214 215static int 216g_part_parm_provider(const char *p, struct g_provider **v) 217{ 218 struct g_provider *pp; 219 220 pp = g_provider_by_name(p); 221 if (pp == NULL) 222 return (EINVAL); 223 *v = pp; 224 return (0); 225} 226 227static int 228g_part_parm_quad(const char *p, quad_t *v) 229{ 230 char *x; 231 quad_t q; 232 233 q = strtoq(p, &x, 0); 234 if (*x != '\0' || q < 0) 235 return (EINVAL); 236 *v = q; 237 return (0); 238} 239 240static int 241g_part_parm_scheme(const char *p, struct g_part_scheme **v) 242{ 243 struct g_part_scheme **iter, *s; 244 245 s = NULL; 246 SET_FOREACH(iter, g_part_scheme_set) { 247 if ((*iter)->name == NULL) 248 continue; 249 if (!strcasecmp((*iter)->name, p)) { 250 s = *iter; 251 break; 252 } 253 } 254 if (s == NULL) 255 return (EINVAL); 256 *v = s; 257 return (0); 258} 259 260static int 261g_part_parm_str(const char *p, const char **v) 262{ 263 264 if (p[0] == '\0') 265 return (EINVAL); 266 *v = p; 267 return (0); 268} 269 270static int 271g_part_parm_uint(const char *p, u_int *v) 272{ 273 char *x; 274 long l; 275 276 l = strtol(p, &x, 0); 277 if (*x != '\0' || l < 0 || l > INT_MAX) 278 return (EINVAL); 279 *v = (unsigned int)l; 280 return (0); 281} 282 283static int 284g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 285{ 286 struct g_part_scheme **iter, *scheme; 287 struct g_part_table *table; 288 int pri, probe; 289 290 table = gp->softc; 291 scheme = (table != NULL) ? table->gpt_scheme : &g_part_null_scheme; 292 pri = (scheme != &g_part_null_scheme) ? G_PART_PROBE(table, cp) : 293 INT_MIN; 294 if (pri == 0) 295 goto done; 296 if (pri > 0) { /* error */ 297 scheme = &g_part_null_scheme; 298 pri = INT_MIN; 299 } 300 301 SET_FOREACH(iter, g_part_scheme_set) { 302 if ((*iter) == &g_part_null_scheme) 303 continue; 304 table = (void *)kobj_create((kobj_class_t)(*iter), M_GEOM, 305 M_WAITOK); 306 table->gpt_gp = gp; 307 table->gpt_scheme = *iter; 308 table->gpt_depth = depth; 309 probe = G_PART_PROBE(table, cp); 310 if (probe <= 0 && probe > pri) { 311 pri = probe; 312 scheme = *iter; 313 if (gp->softc != NULL) 314 kobj_delete((kobj_t)gp->softc, M_GEOM); 315 gp->softc = table; 316 if (pri == 0) 317 goto done; 318 } else 319 kobj_delete((kobj_t)table, M_GEOM); 320 } 321 322done: 323 return ((scheme == &g_part_null_scheme) ? ENXIO : 0); 324} 325 326/* 327 * Control request functions. 328 */ 329 330static int 331g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 332{ 333 char buf[16]; 334 struct g_geom *gp; 335 struct g_provider *pp; 336 struct g_part_entry *delent, *last, *entry; 337 struct g_part_table *table; 338 quad_t end; 339 unsigned int index; 340 int error; 341 342 gp = gpp->gpp_geom; 343 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 344 g_topology_assert(); 345 346 pp = LIST_FIRST(&gp->consumer)->provider; 347 table = gp->softc; 348 end = gpp->gpp_start + gpp->gpp_size - 1; 349 350 if (gpp->gpp_start < table->gpt_first || 351 gpp->gpp_start > table->gpt_last) { 352 gctl_error(req, "%d start '%jd'", EINVAL, 353 (intmax_t)gpp->gpp_start); 354 return (EINVAL); 355 } 356 if (end < gpp->gpp_start || end > table->gpt_last) { 357 gctl_error(req, "%d size '%jd'", EINVAL, 358 (intmax_t)gpp->gpp_size); 359 return (EINVAL); 360 } 361 if (gpp->gpp_index > table->gpt_entries) { 362 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 363 return (EINVAL); 364 } 365 366 delent = last = NULL; 367 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 368 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 369 if (entry->gpe_deleted) { 370 if (entry->gpe_index == index) 371 delent = entry; 372 continue; 373 } 374 if (entry->gpe_index == index) { 375 index = entry->gpe_index + 1; 376 last = entry; 377 } 378 if (gpp->gpp_start >= entry->gpe_start && 379 gpp->gpp_start <= entry->gpe_end) { 380 gctl_error(req, "%d start '%jd'", ENOSPC, 381 (intmax_t)gpp->gpp_start); 382 return (ENOSPC); 383 } 384 if (end >= entry->gpe_start && end <= entry->gpe_end) { 385 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 386 return (ENOSPC); 387 } 388 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 389 gctl_error(req, "%d size '%jd'", ENOSPC, 390 (intmax_t)gpp->gpp_size); 391 return (ENOSPC); 392 } 393 } 394 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 395 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 396 return (EEXIST); 397 } 398 snprintf(buf, sizeof(buf), "%d", index); 399 gctl_set_param(req, "index", buf, strlen(buf) + 1); 400 401 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 402 M_WAITOK | M_ZERO) : delent; 403 entry->gpe_index = index; 404 entry->gpe_start = gpp->gpp_start; 405 entry->gpe_end = end; 406 error = G_PART_ADD(table, entry, gpp); 407 if (error) { 408 gctl_error(req, "%d", error); 409 if (delent == NULL) 410 g_free(entry); 411 return (error); 412 } 413 if (delent == NULL) { 414 if (last == NULL) 415 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 416 else 417 LIST_INSERT_AFTER(last, entry, gpe_entry); 418 entry->gpe_created = 1; 419 } else { 420 entry->gpe_deleted = 0; 421 entry->gpe_modified = 1; 422 } 423 g_part_new_provider(gp, table, entry); 424 return (0); 425} 426 427static int 428g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 429{ 430 struct g_consumer *cp; 431 struct g_geom *gp; 432 struct g_provider *pp; 433 struct g_part_entry *entry, *tmp; 434 struct g_part_table *table; 435 char *buf; 436 int error, i; 437 438 gp = gpp->gpp_geom; 439 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 440 g_topology_assert(); 441 442 table = gp->softc; 443 if (!table->gpt_opened) { 444 gctl_error(req, "%d", EPERM); 445 return (EPERM); 446 } 447 448 cp = LIST_FIRST(&gp->consumer); 449 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 450 pp = cp->provider; 451 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 452 while (table->gpt_smhead != 0) { 453 i = ffs(table->gpt_smhead) - 1; 454 error = g_write_data(cp, i * pp->sectorsize, buf, 455 pp->sectorsize); 456 if (error) { 457 g_free(buf); 458 goto fail; 459 } 460 table->gpt_smhead &= ~(1 << i); 461 } 462 while (table->gpt_smtail != 0) { 463 i = ffs(table->gpt_smtail) - 1; 464 error = g_write_data(cp, pp->mediasize - (i + 1) * 465 pp->sectorsize, buf, pp->sectorsize); 466 if (error) { 467 g_free(buf); 468 goto fail; 469 } 470 table->gpt_smtail &= ~(1 << i); 471 } 472 g_free(buf); 473 } 474 475 if (table->gpt_scheme == &g_part_null_scheme) { 476 g_access(cp, -1, -1, -1); 477 g_part_wither(gp, ENXIO); 478 return (0); 479 } 480 481 error = G_PART_WRITE(table, cp); 482 if (error) 483 goto fail; 484 485 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 486 if (!entry->gpe_deleted) { 487 entry->gpe_created = 0; 488 entry->gpe_modified = 0; 489 continue; 490 } 491 LIST_REMOVE(entry, gpe_entry); 492 g_free(entry); 493 } 494 table->gpt_created = 0; 495 table->gpt_opened = 0; 496 g_access(cp, -1, -1, -1); 497 return (0); 498 499fail: 500 gctl_error(req, "%d", error); 501 return (error); 502} 503 504static int 505g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 506{ 507 struct g_consumer *cp; 508 struct g_geom *gp; 509 struct g_provider *pp; 510 struct g_part_scheme *scheme; 511 struct g_part_table *null, *table; 512 int attr, error; 513 514 pp = gpp->gpp_provider; 515 scheme = gpp->gpp_scheme; 516 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 517 g_topology_assert(); 518 519 /* Check that there isn't already a g_part geom on the provider. */ 520 error = g_part_parm_geom(pp->name, &gp); 521 if (!error) { 522 null = gp->softc; 523 if (null->gpt_scheme != &g_part_null_scheme) { 524 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 525 return (EEXIST); 526 } 527 } else 528 null = NULL; 529 530 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 531 (gpp->gpp_entries < scheme->gps_minent || 532 gpp->gpp_entries > scheme->gps_maxent)) { 533 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 534 return (EINVAL); 535 } 536 537 if (null == NULL) 538 gp = g_new_geomf(&g_part_class, "%s", pp->name); 539 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 540 M_WAITOK); 541 table = gp->softc; 542 table->gpt_gp = gp; 543 table->gpt_scheme = gpp->gpp_scheme; 544 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 545 gpp->gpp_entries : scheme->gps_minent; 546 LIST_INIT(&table->gpt_entry); 547 if (null == NULL) { 548 cp = g_new_consumer(gp); 549 error = g_attach(cp, pp); 550 if (error == 0) 551 error = g_access(cp, 1, 1, 1); 552 if (error != 0) { 553 g_part_wither(gp, error); 554 gctl_error(req, "%d geom '%s'", error, pp->name); 555 return (error); 556 } 557 table->gpt_opened = 1; 558 } else { 559 cp = LIST_FIRST(&gp->consumer); 560 table->gpt_opened = null->gpt_opened; 561 table->gpt_smhead = null->gpt_smhead; 562 table->gpt_smtail = null->gpt_smtail; 563 } 564 565 g_topology_unlock(); 566 567 /* Make sure we can nest and if so, determine our depth. */ 568 error = g_getattr("PART::isleaf", cp, &attr); 569 if (!error && attr) { 570 error = ENODEV; 571 goto fail; 572 } 573 error = g_getattr("PART::depth", cp, &attr); 574 table->gpt_depth = (!error) ? attr + 1 : 0; 575 576 error = G_PART_CREATE(table, gpp); 577 if (error) 578 goto fail; 579 580 g_topology_lock(); 581 582 table->gpt_created = 1; 583 if (null != NULL) 584 kobj_delete((kobj_t)null, M_GEOM); 585 return (0); 586 587fail: 588 g_topology_lock(); 589 if (null == NULL) { 590 g_access(cp, -1, -1, -1); 591 g_part_wither(gp, error); 592 } else { 593 kobj_delete((kobj_t)gp->softc, M_GEOM); 594 gp->softc = null; 595 } 596 gctl_error(req, "%d provider", error); 597 return (error); 598} 599 600static int 601g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 602{ 603 struct g_geom *gp; 604 struct g_provider *pp; 605 struct g_part_entry *entry; 606 struct g_part_table *table; 607 608 gp = gpp->gpp_geom; 609 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 610 g_topology_assert(); 611 612 table = gp->softc; 613 614 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 615 if (entry->gpe_deleted) 616 continue; 617 if (entry->gpe_index == gpp->gpp_index) 618 break; 619 } 620 if (entry == NULL) { 621 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 622 return (ENOENT); 623 } 624 625 pp = entry->gpe_pp; 626 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 627 gctl_error(req, "%d", EBUSY); 628 return (EBUSY); 629 } 630 631 pp->private = NULL; 632 entry->gpe_pp = NULL; 633 if (entry->gpe_created) { 634 LIST_REMOVE(entry, gpe_entry); 635 g_free(entry); 636 } else { 637 entry->gpe_modified = 0; 638 entry->gpe_deleted = 1; 639 } 640 g_wither_provider(pp, ENXIO); 641 return (0); 642} 643 644static int 645g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 646{ 647 struct g_geom *gp; 648 struct g_part_entry *entry; 649 struct g_part_table *null, *table; 650 int error; 651 652 gp = gpp->gpp_geom; 653 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 654 g_topology_assert(); 655 656 table = gp->softc; 657 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 658 if (entry->gpe_deleted) 659 continue; 660 gctl_error(req, "%d", EBUSY); 661 return (EBUSY); 662 } 663 664 error = G_PART_DESTROY(table, gpp); 665 if (error) { 666 gctl_error(req, "%d", error); 667 return (error); 668 } 669 670 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 671 M_WAITOK); 672 null = gp->softc; 673 null->gpt_gp = gp; 674 null->gpt_scheme = &g_part_null_scheme; 675 LIST_INIT(&null->gpt_entry); 676 null->gpt_depth = table->gpt_depth; 677 null->gpt_opened = table->gpt_opened; 678 null->gpt_smhead = table->gpt_smhead; 679 null->gpt_smtail = table->gpt_smtail; 680 681 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 682 LIST_REMOVE(entry, gpe_entry); 683 g_free(entry); 684 } 685 kobj_delete((kobj_t)table, M_GEOM); 686 687 return (0); 688} 689 690static int 691g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 692{ 693 struct g_geom *gp; 694 struct g_part_entry *entry; 695 struct g_part_table *table; 696 int error; 697 698 gp = gpp->gpp_geom; 699 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 700 g_topology_assert(); 701 702 table = gp->softc; 703 704 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 705 if (entry->gpe_deleted) 706 continue; 707 if (entry->gpe_index == gpp->gpp_index) 708 break; 709 } 710 if (entry == NULL) { 711 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 712 return (ENOENT); 713 } 714 715 error = G_PART_MODIFY(table, entry, gpp); 716 if (error) { 717 gctl_error(req, "%d", error); 718 return (error); 719 } 720 721 if (!entry->gpe_created) 722 entry->gpe_modified = 1; 723 return (0); 724} 725 726static int 727g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 728{ 729 gctl_error(req, "%d verb 'move'", ENOSYS); 730 return (ENOSYS); 731} 732 733static int 734g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 735{ 736 gctl_error(req, "%d verb 'recover'", ENOSYS); 737 return (ENOSYS); 738} 739 740static int 741g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 742{ 743 gctl_error(req, "%d verb 'resize'", ENOSYS); 744 return (ENOSYS); 745} 746 747static int 748g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 749{ 750 struct g_consumer *cp; 751 struct g_provider *pp; 752 struct g_geom *gp; 753 struct g_part_entry *entry, *tmp; 754 struct g_part_table *table; 755 int error, reprobe; 756 757 gp = gpp->gpp_geom; 758 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 759 g_topology_assert(); 760 761 table = gp->softc; 762 if (!table->gpt_opened) { 763 gctl_error(req, "%d", EPERM); 764 return (EPERM); 765 } 766 767 cp = LIST_FIRST(&gp->consumer); 768 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 769 entry->gpe_modified = 0; 770 if (entry->gpe_created) { 771 pp = entry->gpe_pp; 772 pp->private = NULL; 773 entry->gpe_pp = NULL; 774 g_wither_provider(pp, ENXIO); 775 entry->gpe_deleted = 1; 776 } 777 if (entry->gpe_deleted) { 778 LIST_REMOVE(entry, gpe_entry); 779 g_free(entry); 780 } 781 } 782 783 g_topology_unlock(); 784 785 reprobe = (table->gpt_scheme == &g_part_null_scheme || 786 table->gpt_created) ? 1 : 0; 787 788 if (reprobe) { 789 if (!LIST_EMPTY(&table->gpt_entry)) { 790 error = EBUSY; 791 goto fail; 792 } 793 error = g_part_probe(gp, cp, table->gpt_depth); 794 if (error) { 795 g_topology_lock(); 796 g_access(cp, -1, -1, -1); 797 g_part_wither(gp, error); 798 return (0); 799 } 800 table = gp->softc; 801 } 802 803 error = G_PART_READ(table, cp); 804 if (error) 805 goto fail; 806 807 g_topology_lock(); 808 809 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) 810 g_part_new_provider(gp, table, entry); 811 812 table->gpt_opened = 0; 813 g_access(cp, -1, -1, -1); 814 return (0); 815 816fail: 817 g_topology_lock(); 818 gctl_error(req, "%d", error); 819 return (error); 820} 821 822static void 823g_part_wither(struct g_geom *gp, int error) 824{ 825 struct g_part_entry *entry; 826 struct g_part_table *table; 827 828 table = gp->softc; 829 if (table != NULL) { 830 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 831 LIST_REMOVE(entry, gpe_entry); 832 g_free(entry); 833 } 834 if (gp->softc != NULL) { 835 kobj_delete((kobj_t)gp->softc, M_GEOM); 836 gp->softc = NULL; 837 } 838 } 839 g_wither_geom(gp, error); 840} 841 842/* 843 * Class methods. 844 */ 845 846static void 847g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 848{ 849 struct g_part_parms gpp; 850 struct g_part_table *table; 851 struct gctl_req_arg *ap; 852 const char *p; 853 enum g_part_ctl ctlreq; 854 unsigned int i, mparms, oparms, parm; 855 int error, modifies; 856 857 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 858 g_topology_assert(); 859 860 ctlreq = G_PART_CTL_NONE; 861 modifies = 0; 862 mparms = 0; 863 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 864 switch (*verb) { 865 case 'a': 866 if (!strcmp(verb, "add")) { 867 ctlreq = G_PART_CTL_ADD; 868 modifies = 1; 869 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 870 G_PART_PARM_START | G_PART_PARM_TYPE; 871 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 872 } 873 break; 874 case 'c': 875 if (!strcmp(verb, "commit")) { 876 ctlreq = G_PART_CTL_COMMIT; 877 mparms |= G_PART_PARM_GEOM; 878 } else if (!strcmp(verb, "create")) { 879 ctlreq = G_PART_CTL_CREATE; 880 modifies = 1; 881 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 882 oparms |= G_PART_PARM_ENTRIES; 883 } 884 break; 885 case 'd': 886 if (!strcmp(verb, "delete")) { 887 ctlreq = G_PART_CTL_DELETE; 888 modifies = 1; 889 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 890 } else if (!strcmp(verb, "destroy")) { 891 ctlreq = G_PART_CTL_DESTROY; 892 modifies = 1; 893 mparms |= G_PART_PARM_GEOM; 894 } 895 break; 896 case 'm': 897 if (!strcmp(verb, "modify")) { 898 ctlreq = G_PART_CTL_MODIFY; 899 modifies = 1; 900 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 901 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 902 } else if (!strcmp(verb, "move")) { 903 ctlreq = G_PART_CTL_MOVE; 904 modifies = 1; 905 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 906 } 907 break; 908 case 'r': 909 if (!strcmp(verb, "recover")) { 910 ctlreq = G_PART_CTL_RECOVER; 911 modifies = 1; 912 mparms |= G_PART_PARM_GEOM; 913 } else if (!strcmp(verb, "resize")) { 914 ctlreq = G_PART_CTL_RESIZE; 915 modifies = 1; 916 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 917 } 918 break; 919 case 'u': 920 if (!strcmp(verb, "undo")) { 921 ctlreq = G_PART_CTL_UNDO; 922 mparms |= G_PART_PARM_GEOM; 923 } 924 break; 925 } 926 if (ctlreq == G_PART_CTL_NONE) { 927 gctl_error(req, "%d verb '%s'", EINVAL, verb); 928 return; 929 } 930 931 bzero(&gpp, sizeof(gpp)); 932 for (i = 0; i < req->narg; i++) { 933 ap = &req->arg[i]; 934 parm = 0; 935 switch (ap->name[0]) { 936 case 'c': 937 if (!strcmp(ap->name, "class")) 938 continue; 939 break; 940 case 'e': 941 if (!strcmp(ap->name, "entries")) 942 parm = G_PART_PARM_ENTRIES; 943 break; 944 case 'f': 945 if (!strcmp(ap->name, "flags")) 946 parm = G_PART_PARM_FLAGS; 947 break; 948 case 'g': 949 if (!strcmp(ap->name, "geom")) 950 parm = G_PART_PARM_GEOM; 951 break; 952 case 'i': 953 if (!strcmp(ap->name, "index")) 954 parm = G_PART_PARM_INDEX; 955 break; 956 case 'l': 957 if (!strcmp(ap->name, "label")) 958 parm = G_PART_PARM_LABEL; 959 break; 960 case 'o': 961 if (!strcmp(ap->name, "output")) 962 parm = G_PART_PARM_OUTPUT; 963 break; 964 case 'p': 965 if (!strcmp(ap->name, "provider")) 966 parm = G_PART_PARM_PROVIDER; 967 break; 968 case 's': 969 if (!strcmp(ap->name, "scheme")) 970 parm = G_PART_PARM_SCHEME; 971 else if (!strcmp(ap->name, "size")) 972 parm = G_PART_PARM_SIZE; 973 else if (!strcmp(ap->name, "start")) 974 parm = G_PART_PARM_START; 975 break; 976 case 't': 977 if (!strcmp(ap->name, "type")) 978 parm = G_PART_PARM_TYPE; 979 break; 980 case 'v': 981 if (!strcmp(ap->name, "verb")) 982 continue; 983 else if (!strcmp(ap->name, "version")) 984 parm = G_PART_PARM_VERSION; 985 break; 986 } 987 if ((parm & (mparms | oparms)) == 0) { 988 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 989 return; 990 } 991 p = gctl_get_asciiparam(req, ap->name); 992 if (p == NULL) { 993 gctl_error(req, "%d param '%s'", ENOATTR, ap->name); 994 return; 995 } 996 switch (parm) { 997 case G_PART_PARM_ENTRIES: 998 error = g_part_parm_uint(p, &gpp.gpp_entries); 999 break; 1000 case G_PART_PARM_FLAGS: 1001 error = g_part_parm_str(p, &gpp.gpp_flags); 1002 break; 1003 case G_PART_PARM_GEOM: 1004 error = g_part_parm_geom(p, &gpp.gpp_geom); 1005 break; 1006 case G_PART_PARM_INDEX: 1007 error = g_part_parm_uint(p, &gpp.gpp_index); 1008 break; 1009 case G_PART_PARM_LABEL: 1010 error = g_part_parm_str(p, &gpp.gpp_label); 1011 break; 1012 case G_PART_PARM_OUTPUT: 1013 error = 0; /* Write-only parameter */ 1014 break; 1015 case G_PART_PARM_PROVIDER: 1016 error = g_part_parm_provider(p, &gpp.gpp_provider); 1017 break; 1018 case G_PART_PARM_SCHEME: 1019 error = g_part_parm_scheme(p, &gpp.gpp_scheme); 1020 break; 1021 case G_PART_PARM_SIZE: 1022 error = g_part_parm_quad(p, &gpp.gpp_size); 1023 break; 1024 case G_PART_PARM_START: 1025 error = g_part_parm_quad(p, &gpp.gpp_start); 1026 break; 1027 case G_PART_PARM_TYPE: 1028 error = g_part_parm_str(p, &gpp.gpp_type); 1029 break; 1030 case G_PART_PARM_VERSION: 1031 error = g_part_parm_uint(p, &gpp.gpp_version); 1032 break; 1033 default: 1034 error = EDOOFUS; 1035 break; 1036 } 1037 if (error) { 1038 gctl_error(req, "%d %s '%s'", error, ap->name, p); 1039 return; 1040 } 1041 gpp.gpp_parms |= parm; 1042 } 1043 if ((gpp.gpp_parms & mparms) != mparms) { 1044 parm = mparms - (gpp.gpp_parms & mparms); 1045 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1046 return; 1047 } 1048 1049 /* Obtain permissions if possible/necessary. */ 1050 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1051 table = gpp.gpp_geom->softc; 1052 if (table != NULL && !table->gpt_opened) { 1053 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1054 1, 1, 1); 1055 if (error) { 1056 gctl_error(req, "%d geom '%s'", error, 1057 gpp.gpp_geom->name); 1058 return; 1059 } 1060 table->gpt_opened = 1; 1061 } 1062 } 1063 1064 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1065 switch (ctlreq) { 1066 case G_PART_CTL_NONE: 1067 panic("%s", __func__); 1068 case G_PART_CTL_ADD: 1069 error = g_part_ctl_add(req, &gpp); 1070 break; 1071 case G_PART_CTL_COMMIT: 1072 error = g_part_ctl_commit(req, &gpp); 1073 break; 1074 case G_PART_CTL_CREATE: 1075 error = g_part_ctl_create(req, &gpp); 1076 break; 1077 case G_PART_CTL_DELETE: 1078 error = g_part_ctl_delete(req, &gpp); 1079 break; 1080 case G_PART_CTL_DESTROY: 1081 error = g_part_ctl_destroy(req, &gpp); 1082 break; 1083 case G_PART_CTL_MODIFY: 1084 error = g_part_ctl_modify(req, &gpp); 1085 break; 1086 case G_PART_CTL_MOVE: 1087 error = g_part_ctl_move(req, &gpp); 1088 break; 1089 case G_PART_CTL_RECOVER: 1090 error = g_part_ctl_recover(req, &gpp); 1091 break; 1092 case G_PART_CTL_RESIZE: 1093 error = g_part_ctl_resize(req, &gpp); 1094 break; 1095 case G_PART_CTL_UNDO: 1096 error = g_part_ctl_undo(req, &gpp); 1097 break; 1098 } 1099} 1100 1101static int 1102g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1103 struct g_geom *gp) 1104{ 1105 1106 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1107 g_topology_assert(); 1108 1109 g_part_wither(gp, EINVAL); 1110 return (0); 1111} 1112 1113static struct g_geom * 1114g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1115{ 1116 struct g_consumer *cp; 1117 struct g_geom *gp; 1118 struct g_part_entry *entry; 1119 struct g_part_table *table; 1120 int attr, depth; 1121 int error; 1122 1123 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1124 g_topology_assert(); 1125 1126 /* 1127 * Create a GEOM with consumer and hook it up to the provider. 1128 * With that we become part of the topology. Optain read access 1129 * to the provider. 1130 */ 1131 gp = g_new_geomf(mp, "%s", pp->name); 1132 cp = g_new_consumer(gp); 1133 error = g_attach(cp, pp); 1134 if (error == 0) 1135 error = g_access(cp, 1, 0, 0); 1136 if (error != 0) { 1137 g_part_wither(gp, error); 1138 return (NULL); 1139 } 1140 1141 g_topology_unlock(); 1142 1143 /* Make sure we can nest and if so, determine our depth. */ 1144 error = g_getattr("PART::isleaf", cp, &attr); 1145 if (!error && attr) { 1146 error = ENODEV; 1147 goto fail; 1148 } 1149 error = g_getattr("PART::depth", cp, &attr); 1150 depth = (!error) ? attr + 1 : 0; 1151 1152 error = g_part_probe(gp, cp, depth); 1153 if (error) 1154 goto fail; 1155 1156 table = gp->softc; 1157 error = G_PART_READ(table, cp); 1158 if (error) 1159 goto fail; 1160 1161 g_topology_lock(); 1162 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) 1163 g_part_new_provider(gp, table, entry); 1164 1165 g_access(cp, -1, 0, 0); 1166 return (gp); 1167 1168 fail: 1169 g_topology_lock(); 1170 g_access(cp, -1, 0, 0); 1171 g_part_wither(gp, error); 1172 return (NULL); 1173} 1174 1175/* 1176 * Geom methods. 1177 */ 1178 1179static int 1180g_part_access(struct g_provider *pp, int dr, int dw, int de) 1181{ 1182 struct g_consumer *cp; 1183 1184 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1185 dw, de)); 1186 1187 cp = LIST_FIRST(&pp->geom->consumer); 1188 1189 /* We always gain write-exclusive access. */ 1190 return (g_access(cp, dr, dw, dw + de)); 1191} 1192 1193static void 1194g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1195 struct g_consumer *cp, struct g_provider *pp) 1196{ 1197 char buf[64]; 1198 struct g_part_entry *entry; 1199 struct g_part_table *table; 1200 1201 KASSERT(sb != NULL && gp != NULL, (__func__)); 1202 table = gp->softc; 1203 1204 if (indent == NULL) { 1205 KASSERT(cp == NULL && pp != NULL, (__func__)); 1206 entry = pp->private; 1207 if (entry == NULL) 1208 return; 1209 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1210 (uintmax_t)entry->gpe_offset, 1211 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1212 } else if (cp != NULL) { /* Consumer configuration. */ 1213 KASSERT(pp == NULL, (__func__)); 1214 /* none */ 1215 } else if (pp != NULL) { /* Provider configuration. */ 1216 entry = pp->private; 1217 if (entry == NULL) 1218 return; 1219 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1220 entry->gpe_index); 1221 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1222 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1223 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1224 (uintmax_t)entry->gpe_offset); 1225 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1226 (uintmax_t)pp->mediasize); 1227 G_PART_DUMPCONF(table, entry, sb, indent); 1228 } else { /* Geom configuration. */ 1229 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1230 table->gpt_scheme->name); 1231 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1232 table->gpt_entries); 1233 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1234 (uintmax_t)table->gpt_first); 1235 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1236 (uintmax_t)table->gpt_last); 1237 G_PART_DUMPCONF(table, NULL, sb, indent); 1238 } 1239} 1240 1241static void 1242g_part_orphan(struct g_consumer *cp) 1243{ 1244 struct g_provider *pp; 1245 1246 pp = cp->provider; 1247 KASSERT(pp != NULL, (__func__)); 1248 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1249 g_topology_assert(); 1250 1251 KASSERT(pp->error != 0, (__func__)); 1252 g_part_wither(cp->geom, pp->error); 1253} 1254 1255static void 1256g_part_spoiled(struct g_consumer *cp) 1257{ 1258 1259 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1260 g_topology_assert(); 1261 1262 g_part_wither(cp->geom, ENXIO); 1263} 1264 1265static void 1266g_part_start(struct bio *bp) 1267{ 1268 struct bio *bp2; 1269 struct g_consumer *cp; 1270 struct g_geom *gp; 1271 struct g_part_entry *entry; 1272 struct g_part_table *table; 1273 struct g_kerneldump *gkd; 1274 struct g_provider *pp; 1275 int attr; 1276 1277 pp = bp->bio_to; 1278 gp = pp->geom; 1279 table = gp->softc; 1280 cp = LIST_FIRST(&gp->consumer); 1281 1282 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 1283 pp->name)); 1284 1285 entry = pp->private; 1286 if (entry == NULL) { 1287 g_io_deliver(bp, ENXIO); 1288 return; 1289 } 1290 1291 switch(bp->bio_cmd) { 1292 case BIO_DELETE: 1293 case BIO_READ: 1294 case BIO_WRITE: 1295 if (bp->bio_offset >= pp->mediasize) { 1296 g_io_deliver(bp, EIO); 1297 return; 1298 } 1299 bp2 = g_clone_bio(bp); 1300 if (bp2 == NULL) { 1301 g_io_deliver(bp, ENOMEM); 1302 return; 1303 } 1304 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 1305 bp2->bio_length = pp->mediasize - bp2->bio_offset; 1306 bp2->bio_done = g_std_done; 1307 bp2->bio_offset += entry->gpe_offset; 1308 g_io_request(bp2, cp); 1309 return; 1310 case BIO_FLUSH: 1311 break; 1312 case BIO_GETATTR: 1313 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 1314 /* 1315 * Check that the partition is suitable for kernel 1316 * dumps. Typically only swap partitions should be 1317 * used. 1318 */ 1319 if (!G_PART_DUMPTO(table, entry)) { 1320 g_io_deliver(bp, ENXIO); 1321 return; 1322 } 1323 gkd = (struct g_kerneldump *)bp->bio_data; 1324 if (gkd->offset >= pp->mediasize) { 1325 g_io_deliver(bp, EIO); 1326 return; 1327 } 1328 if (gkd->offset + gkd->length > pp->mediasize) 1329 gkd->length = pp->mediasize - gkd->offset; 1330 gkd->offset += entry->gpe_offset; 1331 } else if (!strcmp("PART::isleaf", bp->bio_attribute)) { 1332 if (bp->bio_length != sizeof(int)) { 1333 g_io_deliver(bp, EFAULT); 1334 return; 1335 } 1336 attr = table->gpt_isleaf ? 1 : 0; 1337 bcopy(&attr, bp->bio_data, sizeof(int)); 1338 bp->bio_completed = sizeof(int); 1339 g_io_deliver(bp, 0); 1340 return; 1341 } else if (!strcmp("PART::depth", bp->bio_attribute)) { 1342 if (bp->bio_length != sizeof(int)) { 1343 g_io_deliver(bp, EFAULT); 1344 return; 1345 } 1346 bcopy(&table->gpt_depth, bp->bio_data, sizeof(int)); 1347 bp->bio_completed = sizeof(int); 1348 g_io_deliver(bp, 0); 1349 return; 1350 } 1351 break; 1352 default: 1353 g_io_deliver(bp, EOPNOTSUPP); 1354 return; 1355 } 1356 1357 bp2 = g_clone_bio(bp); 1358 if (bp2 == NULL) { 1359 g_io_deliver(bp, ENOMEM); 1360 return; 1361 } 1362 bp2->bio_done = g_std_done; 1363 g_io_request(bp2, cp); 1364} 1365