g_part.c revision 200534
1/*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/geom/part/g_part.c 200534 2009-12-14 20:04:06Z rpaulo $"); 29 30#include <sys/param.h> 31#include <sys/bio.h> 32#include <sys/diskmbr.h> 33#include <sys/endian.h> 34#include <sys/kernel.h> 35#include <sys/kobj.h> 36#include <sys/limits.h> 37#include <sys/lock.h> 38#include <sys/malloc.h> 39#include <sys/mutex.h> 40#include <sys/queue.h> 41#include <sys/sbuf.h> 42#include <sys/systm.h> 43#include <sys/uuid.h> 44#include <geom/geom.h> 45#include <geom/geom_ctl.h> 46#include <geom/geom_int.h> 47#include <geom/part/g_part.h> 48 49#include "g_part_if.h" 50 51#ifndef _PATH_DEV 52#define _PATH_DEV "/dev/" 53#endif 54 55static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57}; 58 59static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63}; 64 65TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71} g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 74 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 75 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 79 { "efi", G_PART_ALIAS_EFI }, 80 { "freebsd", G_PART_ALIAS_FREEBSD }, 81 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 82 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 83 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 84 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 85 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 86 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 87 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 88 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 89 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 90 { "mbr", G_PART_ALIAS_MBR } 91}; 92 93/* 94 * The GEOM partitioning class. 95 */ 96static g_ctl_req_t g_part_ctlreq; 97static g_ctl_destroy_geom_t g_part_destroy_geom; 98static g_fini_t g_part_fini; 99static g_init_t g_part_init; 100static g_taste_t g_part_taste; 101 102static g_access_t g_part_access; 103static g_dumpconf_t g_part_dumpconf; 104static g_orphan_t g_part_orphan; 105static g_spoiled_t g_part_spoiled; 106static g_start_t g_part_start; 107 108static struct g_class g_part_class = { 109 .name = "PART", 110 .version = G_VERSION, 111 /* Class methods. */ 112 .ctlreq = g_part_ctlreq, 113 .destroy_geom = g_part_destroy_geom, 114 .fini = g_part_fini, 115 .init = g_part_init, 116 .taste = g_part_taste, 117 /* Geom methods. */ 118 .access = g_part_access, 119 .dumpconf = g_part_dumpconf, 120 .orphan = g_part_orphan, 121 .spoiled = g_part_spoiled, 122 .start = g_part_start, 123}; 124 125DECLARE_GEOM_CLASS(g_part_class, g_part); 126 127/* 128 * Support functions. 129 */ 130 131static void g_part_wither(struct g_geom *, int); 132 133const char * 134g_part_alias_name(enum g_part_alias alias) 135{ 136 int i; 137 138 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 139 if (g_part_alias_list[i].alias != alias) 140 continue; 141 return (g_part_alias_list[i].lexeme); 142 } 143 144 return (NULL); 145} 146 147void 148g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 149 u_int *bestheads) 150{ 151 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 152 off_t chs, cylinders; 153 u_int heads; 154 int idx; 155 156 *bestchs = 0; 157 *bestheads = 0; 158 for (idx = 0; candidate_heads[idx] != 0; idx++) { 159 heads = candidate_heads[idx]; 160 cylinders = blocks / heads / sectors; 161 if (cylinders < heads || cylinders < sectors) 162 break; 163 if (cylinders > 1023) 164 continue; 165 chs = cylinders * heads * sectors; 166 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 167 *bestchs = chs; 168 *bestheads = heads; 169 } 170 } 171} 172 173static void 174g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 175 off_t blocks) 176{ 177 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 178 off_t chs, bestchs; 179 u_int heads, sectors; 180 int idx; 181 182 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 183 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 184 table->gpt_fixgeom = 0; 185 table->gpt_heads = 0; 186 table->gpt_sectors = 0; 187 bestchs = 0; 188 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 189 sectors = candidate_sectors[idx]; 190 g_part_geometry_heads(blocks, sectors, &chs, &heads); 191 if (chs == 0) 192 continue; 193 /* 194 * Prefer a geometry with sectors > 1, but only if 195 * it doesn't bump down the numbver of heads to 1. 196 */ 197 if (chs > bestchs || (chs == bestchs && heads > 1 && 198 table->gpt_sectors == 1)) { 199 bestchs = chs; 200 table->gpt_heads = heads; 201 table->gpt_sectors = sectors; 202 } 203 } 204 /* 205 * If we didn't find a geometry at all, then the disk is 206 * too big. This means we can use the maximum number of 207 * heads and sectors. 208 */ 209 if (bestchs == 0) { 210 table->gpt_heads = 255; 211 table->gpt_sectors = 63; 212 } 213 } else { 214 table->gpt_fixgeom = 1; 215 table->gpt_heads = heads; 216 table->gpt_sectors = sectors; 217 } 218} 219 220struct g_part_entry * 221g_part_new_entry(struct g_part_table *table, int index, quad_t start, 222 quad_t end) 223{ 224 struct g_part_entry *entry, *last; 225 226 last = NULL; 227 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 228 if (entry->gpe_index == index) 229 break; 230 if (entry->gpe_index > index) { 231 entry = NULL; 232 break; 233 } 234 last = entry; 235 } 236 if (entry == NULL) { 237 entry = g_malloc(table->gpt_scheme->gps_entrysz, 238 M_WAITOK | M_ZERO); 239 entry->gpe_index = index; 240 if (last == NULL) 241 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 242 else 243 LIST_INSERT_AFTER(last, entry, gpe_entry); 244 } else 245 entry->gpe_offset = 0; 246 entry->gpe_start = start; 247 entry->gpe_end = end; 248 return (entry); 249} 250 251static void 252g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 253 struct g_part_entry *entry) 254{ 255 struct g_consumer *cp; 256 struct g_provider *pp; 257 struct sbuf *sb; 258 off_t offset; 259 260 cp = LIST_FIRST(&gp->consumer); 261 pp = cp->provider; 262 263 offset = entry->gpe_start * pp->sectorsize; 264 if (entry->gpe_offset < offset) 265 entry->gpe_offset = offset; 266 267 if (entry->gpe_pp == NULL) { 268 sb = sbuf_new_auto(); 269 G_PART_FULLNAME(table, entry, sb, gp->name); 270 sbuf_finish(sb); 271 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 272 sbuf_delete(sb); 273 entry->gpe_pp->private = entry; /* Close the circle. */ 274 } 275 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 276 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 277 pp->sectorsize; 278 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 279 entry->gpe_pp->sectorsize = pp->sectorsize; 280 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 281 if (pp->stripesize > 0) { 282 entry->gpe_pp->stripesize = pp->stripesize; 283 entry->gpe_pp->stripeoffset = (pp->stripeoffset + 284 entry->gpe_offset) % pp->stripesize; 285 } 286 g_error_provider(entry->gpe_pp, 0); 287} 288 289static int 290g_part_parm_geom(const char *rawname, struct g_geom **v) 291{ 292 struct g_geom *gp; 293 const char *pname; 294 295 if (strncmp(rawname, _PATH_DEV, strlen(_PATH_DEV)) == 0) 296 pname = rawname + strlen(_PATH_DEV); 297 else 298 pname = rawname; 299 LIST_FOREACH(gp, &g_part_class.geom, geom) { 300 if (!strcmp(pname, gp->name)) 301 break; 302 } 303 if (gp == NULL) 304 return (EINVAL); 305 *v = gp; 306 return (0); 307} 308 309static int 310g_part_parm_provider(const char *pname, struct g_provider **v) 311{ 312 struct g_provider *pp; 313 314 if (strncmp(pname, _PATH_DEV, strlen(_PATH_DEV)) == 0) 315 pp = g_provider_by_name(pname + strlen(_PATH_DEV)); 316 else 317 pp = g_provider_by_name(pname); 318 if (pp == NULL) 319 return (EINVAL); 320 *v = pp; 321 return (0); 322} 323 324static int 325g_part_parm_quad(const char *p, quad_t *v) 326{ 327 char *x; 328 quad_t q; 329 330 q = strtoq(p, &x, 0); 331 if (*x != '\0' || q < 0) 332 return (EINVAL); 333 *v = q; 334 return (0); 335} 336 337static int 338g_part_parm_scheme(const char *p, struct g_part_scheme **v) 339{ 340 struct g_part_scheme *s; 341 342 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 343 if (s == &g_part_null_scheme) 344 continue; 345 if (!strcasecmp(s->name, p)) 346 break; 347 } 348 if (s == NULL) 349 return (EINVAL); 350 *v = s; 351 return (0); 352} 353 354static int 355g_part_parm_str(const char *p, const char **v) 356{ 357 358 if (p[0] == '\0') 359 return (EINVAL); 360 *v = p; 361 return (0); 362} 363 364static int 365g_part_parm_uint(const char *p, u_int *v) 366{ 367 char *x; 368 long l; 369 370 l = strtol(p, &x, 0); 371 if (*x != '\0' || l < 0 || l > INT_MAX) 372 return (EINVAL); 373 *v = (unsigned int)l; 374 return (0); 375} 376 377static int 378g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 379{ 380 struct g_part_scheme *iter, *scheme; 381 struct g_part_table *table; 382 int pri, probe; 383 384 table = gp->softc; 385 scheme = (table != NULL) ? table->gpt_scheme : NULL; 386 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 387 if (pri == 0) 388 goto done; 389 if (pri > 0) { /* error */ 390 scheme = NULL; 391 pri = INT_MIN; 392 } 393 394 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 395 if (iter == &g_part_null_scheme) 396 continue; 397 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 398 M_WAITOK); 399 table->gpt_gp = gp; 400 table->gpt_scheme = iter; 401 table->gpt_depth = depth; 402 probe = G_PART_PROBE(table, cp); 403 if (probe <= 0 && probe > pri) { 404 pri = probe; 405 scheme = iter; 406 if (gp->softc != NULL) 407 kobj_delete((kobj_t)gp->softc, M_GEOM); 408 gp->softc = table; 409 if (pri == 0) 410 goto done; 411 } else 412 kobj_delete((kobj_t)table, M_GEOM); 413 } 414 415done: 416 return ((scheme == NULL) ? ENXIO : 0); 417} 418 419/* 420 * Control request functions. 421 */ 422 423static int 424g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 425{ 426 struct g_geom *gp; 427 struct g_provider *pp; 428 struct g_part_entry *delent, *last, *entry; 429 struct g_part_table *table; 430 struct sbuf *sb; 431 quad_t end; 432 unsigned int index; 433 int error; 434 435 gp = gpp->gpp_geom; 436 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 437 g_topology_assert(); 438 439 pp = LIST_FIRST(&gp->consumer)->provider; 440 table = gp->softc; 441 end = gpp->gpp_start + gpp->gpp_size - 1; 442 443 if (gpp->gpp_start < table->gpt_first || 444 gpp->gpp_start > table->gpt_last) { 445 gctl_error(req, "%d start '%jd'", EINVAL, 446 (intmax_t)gpp->gpp_start); 447 return (EINVAL); 448 } 449 if (end < gpp->gpp_start || end > table->gpt_last) { 450 gctl_error(req, "%d size '%jd'", EINVAL, 451 (intmax_t)gpp->gpp_size); 452 return (EINVAL); 453 } 454 if (gpp->gpp_index > table->gpt_entries) { 455 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 456 return (EINVAL); 457 } 458 459 delent = last = NULL; 460 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 461 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 462 if (entry->gpe_deleted) { 463 if (entry->gpe_index == index) 464 delent = entry; 465 continue; 466 } 467 if (entry->gpe_index == index) 468 index = entry->gpe_index + 1; 469 if (entry->gpe_index < index) 470 last = entry; 471 if (entry->gpe_internal) 472 continue; 473 if (gpp->gpp_start >= entry->gpe_start && 474 gpp->gpp_start <= entry->gpe_end) { 475 gctl_error(req, "%d start '%jd'", ENOSPC, 476 (intmax_t)gpp->gpp_start); 477 return (ENOSPC); 478 } 479 if (end >= entry->gpe_start && end <= entry->gpe_end) { 480 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 481 return (ENOSPC); 482 } 483 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 484 gctl_error(req, "%d size '%jd'", ENOSPC, 485 (intmax_t)gpp->gpp_size); 486 return (ENOSPC); 487 } 488 } 489 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 490 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 491 return (EEXIST); 492 } 493 if (index > table->gpt_entries) { 494 gctl_error(req, "%d index '%d'", ENOSPC, index); 495 return (ENOSPC); 496 } 497 498 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 499 M_WAITOK | M_ZERO) : delent; 500 entry->gpe_index = index; 501 entry->gpe_start = gpp->gpp_start; 502 entry->gpe_end = end; 503 error = G_PART_ADD(table, entry, gpp); 504 if (error) { 505 gctl_error(req, "%d", error); 506 if (delent == NULL) 507 g_free(entry); 508 return (error); 509 } 510 if (delent == NULL) { 511 if (last == NULL) 512 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 513 else 514 LIST_INSERT_AFTER(last, entry, gpe_entry); 515 entry->gpe_created = 1; 516 } else { 517 entry->gpe_deleted = 0; 518 entry->gpe_modified = 1; 519 } 520 g_part_new_provider(gp, table, entry); 521 522 /* Provide feedback if so requested. */ 523 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 524 sb = sbuf_new_auto(); 525 G_PART_FULLNAME(table, entry, sb, gp->name); 526 sbuf_cat(sb, " added\n"); 527 sbuf_finish(sb); 528 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 529 sbuf_delete(sb); 530 } 531 return (0); 532} 533 534static int 535g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 536{ 537 struct g_geom *gp; 538 struct g_part_table *table; 539 struct sbuf *sb; 540 int error, sz; 541 542 gp = gpp->gpp_geom; 543 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 544 g_topology_assert(); 545 546 table = gp->softc; 547 sz = table->gpt_scheme->gps_bootcodesz; 548 if (sz == 0) { 549 error = ENODEV; 550 goto fail; 551 } 552 if (gpp->gpp_codesize > sz) { 553 error = EFBIG; 554 goto fail; 555 } 556 557 error = G_PART_BOOTCODE(table, gpp); 558 if (error) 559 goto fail; 560 561 /* Provide feedback if so requested. */ 562 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 563 sb = sbuf_new_auto(); 564 sbuf_printf(sb, "%s has bootcode\n", gp->name); 565 sbuf_finish(sb); 566 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 567 sbuf_delete(sb); 568 } 569 return (0); 570 571 fail: 572 gctl_error(req, "%d", error); 573 return (error); 574} 575 576static int 577g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 578{ 579 struct g_consumer *cp; 580 struct g_geom *gp; 581 struct g_provider *pp; 582 struct g_part_entry *entry, *tmp; 583 struct g_part_table *table; 584 char *buf; 585 int error, i; 586 587 gp = gpp->gpp_geom; 588 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 589 g_topology_assert(); 590 591 table = gp->softc; 592 if (!table->gpt_opened) { 593 gctl_error(req, "%d", EPERM); 594 return (EPERM); 595 } 596 597 g_topology_unlock(); 598 599 cp = LIST_FIRST(&gp->consumer); 600 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 601 pp = cp->provider; 602 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 603 while (table->gpt_smhead != 0) { 604 i = ffs(table->gpt_smhead) - 1; 605 error = g_write_data(cp, i * pp->sectorsize, buf, 606 pp->sectorsize); 607 if (error) { 608 g_free(buf); 609 goto fail; 610 } 611 table->gpt_smhead &= ~(1 << i); 612 } 613 while (table->gpt_smtail != 0) { 614 i = ffs(table->gpt_smtail) - 1; 615 error = g_write_data(cp, pp->mediasize - (i + 1) * 616 pp->sectorsize, buf, pp->sectorsize); 617 if (error) { 618 g_free(buf); 619 goto fail; 620 } 621 table->gpt_smtail &= ~(1 << i); 622 } 623 g_free(buf); 624 } 625 626 if (table->gpt_scheme == &g_part_null_scheme) { 627 g_topology_lock(); 628 g_access(cp, -1, -1, -1); 629 g_part_wither(gp, ENXIO); 630 return (0); 631 } 632 633 error = G_PART_WRITE(table, cp); 634 if (error) 635 goto fail; 636 637 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 638 if (!entry->gpe_deleted) { 639 entry->gpe_created = 0; 640 entry->gpe_modified = 0; 641 continue; 642 } 643 LIST_REMOVE(entry, gpe_entry); 644 g_free(entry); 645 } 646 table->gpt_created = 0; 647 table->gpt_opened = 0; 648 649 g_topology_lock(); 650 g_access(cp, -1, -1, -1); 651 return (0); 652 653fail: 654 g_topology_lock(); 655 gctl_error(req, "%d", error); 656 return (error); 657} 658 659static int 660g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 661{ 662 struct g_consumer *cp; 663 struct g_geom *gp; 664 struct g_provider *pp; 665 struct g_part_scheme *scheme; 666 struct g_part_table *null, *table; 667 struct sbuf *sb; 668 int attr, error; 669 670 pp = gpp->gpp_provider; 671 scheme = gpp->gpp_scheme; 672 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 673 g_topology_assert(); 674 675 /* Check that there isn't already a g_part geom on the provider. */ 676 error = g_part_parm_geom(pp->name, &gp); 677 if (!error) { 678 null = gp->softc; 679 if (null->gpt_scheme != &g_part_null_scheme) { 680 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 681 return (EEXIST); 682 } 683 } else 684 null = NULL; 685 686 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 687 (gpp->gpp_entries < scheme->gps_minent || 688 gpp->gpp_entries > scheme->gps_maxent)) { 689 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 690 return (EINVAL); 691 } 692 693 if (null == NULL) 694 gp = g_new_geomf(&g_part_class, "%s", pp->name); 695 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 696 M_WAITOK); 697 table = gp->softc; 698 table->gpt_gp = gp; 699 table->gpt_scheme = gpp->gpp_scheme; 700 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 701 gpp->gpp_entries : scheme->gps_minent; 702 LIST_INIT(&table->gpt_entry); 703 if (null == NULL) { 704 cp = g_new_consumer(gp); 705 error = g_attach(cp, pp); 706 if (error == 0) 707 error = g_access(cp, 1, 1, 1); 708 if (error != 0) { 709 g_part_wither(gp, error); 710 gctl_error(req, "%d geom '%s'", error, pp->name); 711 return (error); 712 } 713 table->gpt_opened = 1; 714 } else { 715 cp = LIST_FIRST(&gp->consumer); 716 table->gpt_opened = null->gpt_opened; 717 table->gpt_smhead = null->gpt_smhead; 718 table->gpt_smtail = null->gpt_smtail; 719 } 720 721 g_topology_unlock(); 722 723 /* Make sure the provider has media. */ 724 if (pp->mediasize == 0 || pp->sectorsize == 0) { 725 error = ENODEV; 726 goto fail; 727 } 728 729 /* Make sure we can nest and if so, determine our depth. */ 730 error = g_getattr("PART::isleaf", cp, &attr); 731 if (!error && attr) { 732 error = ENODEV; 733 goto fail; 734 } 735 error = g_getattr("PART::depth", cp, &attr); 736 table->gpt_depth = (!error) ? attr + 1 : 0; 737 738 /* 739 * Synthesize a disk geometry. Some partitioning schemes 740 * depend on it and since some file systems need it even 741 * when the partitition scheme doesn't, we do it here in 742 * scheme-independent code. 743 */ 744 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 745 746 error = G_PART_CREATE(table, gpp); 747 if (error) 748 goto fail; 749 750 g_topology_lock(); 751 752 table->gpt_created = 1; 753 if (null != NULL) 754 kobj_delete((kobj_t)null, M_GEOM); 755 756 /* 757 * Support automatic commit by filling in the gpp_geom 758 * parameter. 759 */ 760 gpp->gpp_parms |= G_PART_PARM_GEOM; 761 gpp->gpp_geom = gp; 762 763 /* Provide feedback if so requested. */ 764 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 765 sb = sbuf_new_auto(); 766 sbuf_printf(sb, "%s created\n", gp->name); 767 sbuf_finish(sb); 768 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 769 sbuf_delete(sb); 770 } 771 return (0); 772 773fail: 774 g_topology_lock(); 775 if (null == NULL) { 776 g_access(cp, -1, -1, -1); 777 g_part_wither(gp, error); 778 } else { 779 kobj_delete((kobj_t)gp->softc, M_GEOM); 780 gp->softc = null; 781 } 782 gctl_error(req, "%d provider", error); 783 return (error); 784} 785 786static int 787g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 788{ 789 struct g_geom *gp; 790 struct g_provider *pp; 791 struct g_part_entry *entry; 792 struct g_part_table *table; 793 struct sbuf *sb; 794 795 gp = gpp->gpp_geom; 796 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 797 g_topology_assert(); 798 799 table = gp->softc; 800 801 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 802 if (entry->gpe_deleted || entry->gpe_internal) 803 continue; 804 if (entry->gpe_index == gpp->gpp_index) 805 break; 806 } 807 if (entry == NULL) { 808 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 809 return (ENOENT); 810 } 811 812 pp = entry->gpe_pp; 813 if (pp != NULL) { 814 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 815 gctl_error(req, "%d", EBUSY); 816 return (EBUSY); 817 } 818 819 pp->private = NULL; 820 entry->gpe_pp = NULL; 821 } 822 823 if (entry->gpe_created) { 824 LIST_REMOVE(entry, gpe_entry); 825 g_free(entry); 826 } else { 827 entry->gpe_modified = 0; 828 entry->gpe_deleted = 1; 829 } 830 831 if (pp != NULL) 832 g_wither_provider(pp, ENXIO); 833 834 /* Provide feedback if so requested. */ 835 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 836 sb = sbuf_new_auto(); 837 G_PART_FULLNAME(table, entry, sb, gp->name); 838 sbuf_cat(sb, " deleted\n"); 839 sbuf_finish(sb); 840 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 841 sbuf_delete(sb); 842 } 843 return (0); 844} 845 846static int 847g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 848{ 849 struct g_geom *gp; 850 struct g_part_entry *entry; 851 struct g_part_table *null, *table; 852 struct sbuf *sb; 853 int error; 854 855 gp = gpp->gpp_geom; 856 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 857 g_topology_assert(); 858 859 table = gp->softc; 860 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 861 if (entry->gpe_deleted || entry->gpe_internal) 862 continue; 863 gctl_error(req, "%d", EBUSY); 864 return (EBUSY); 865 } 866 867 error = G_PART_DESTROY(table, gpp); 868 if (error) { 869 gctl_error(req, "%d", error); 870 return (error); 871 } 872 873 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 874 M_WAITOK); 875 null = gp->softc; 876 null->gpt_gp = gp; 877 null->gpt_scheme = &g_part_null_scheme; 878 LIST_INIT(&null->gpt_entry); 879 null->gpt_depth = table->gpt_depth; 880 null->gpt_opened = table->gpt_opened; 881 null->gpt_smhead = table->gpt_smhead; 882 null->gpt_smtail = table->gpt_smtail; 883 884 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 885 LIST_REMOVE(entry, gpe_entry); 886 g_free(entry); 887 } 888 kobj_delete((kobj_t)table, M_GEOM); 889 890 /* Provide feedback if so requested. */ 891 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 892 sb = sbuf_new_auto(); 893 sbuf_printf(sb, "%s destroyed\n", gp->name); 894 sbuf_finish(sb); 895 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 896 sbuf_delete(sb); 897 } 898 return (0); 899} 900 901static int 902g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 903{ 904 struct g_geom *gp; 905 struct g_part_entry *entry; 906 struct g_part_table *table; 907 struct sbuf *sb; 908 int error; 909 910 gp = gpp->gpp_geom; 911 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 912 g_topology_assert(); 913 914 table = gp->softc; 915 916 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 917 if (entry->gpe_deleted || entry->gpe_internal) 918 continue; 919 if (entry->gpe_index == gpp->gpp_index) 920 break; 921 } 922 if (entry == NULL) { 923 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 924 return (ENOENT); 925 } 926 927 error = G_PART_MODIFY(table, entry, gpp); 928 if (error) { 929 gctl_error(req, "%d", error); 930 return (error); 931 } 932 933 if (!entry->gpe_created) 934 entry->gpe_modified = 1; 935 936 /* Provide feedback if so requested. */ 937 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 938 sb = sbuf_new_auto(); 939 G_PART_FULLNAME(table, entry, sb, gp->name); 940 sbuf_cat(sb, " modified\n"); 941 sbuf_finish(sb); 942 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 943 sbuf_delete(sb); 944 } 945 return (0); 946} 947 948static int 949g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 950{ 951 gctl_error(req, "%d verb 'move'", ENOSYS); 952 return (ENOSYS); 953} 954 955static int 956g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 957{ 958 gctl_error(req, "%d verb 'recover'", ENOSYS); 959 return (ENOSYS); 960} 961 962static int 963g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 964{ 965 gctl_error(req, "%d verb 'resize'", ENOSYS); 966 return (ENOSYS); 967} 968 969static int 970g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 971 unsigned int set) 972{ 973 struct g_geom *gp; 974 struct g_part_entry *entry; 975 struct g_part_table *table; 976 struct sbuf *sb; 977 int error; 978 979 gp = gpp->gpp_geom; 980 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 981 g_topology_assert(); 982 983 table = gp->softc; 984 985 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 986 if (entry->gpe_deleted || entry->gpe_internal) 987 continue; 988 if (entry->gpe_index == gpp->gpp_index) 989 break; 990 } 991 if (entry == NULL) { 992 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 993 return (ENOENT); 994 } 995 996 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 997 if (error) { 998 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 999 return (error); 1000 } 1001 1002 /* Provide feedback if so requested. */ 1003 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1004 sb = sbuf_new_auto(); 1005 G_PART_FULLNAME(table, entry, sb, gp->name); 1006 sbuf_printf(sb, " has %s %sset\n", gpp->gpp_attrib, 1007 (set) ? "" : "un"); 1008 sbuf_finish(sb); 1009 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1010 sbuf_delete(sb); 1011 } 1012 return (0); 1013} 1014 1015static int 1016g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1017{ 1018 struct g_consumer *cp; 1019 struct g_provider *pp; 1020 struct g_geom *gp; 1021 struct g_part_entry *entry, *tmp; 1022 struct g_part_table *table; 1023 int error, reprobe; 1024 1025 gp = gpp->gpp_geom; 1026 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1027 g_topology_assert(); 1028 1029 table = gp->softc; 1030 if (!table->gpt_opened) { 1031 gctl_error(req, "%d", EPERM); 1032 return (EPERM); 1033 } 1034 1035 cp = LIST_FIRST(&gp->consumer); 1036 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1037 entry->gpe_modified = 0; 1038 if (entry->gpe_created) { 1039 pp = entry->gpe_pp; 1040 if (pp != NULL) { 1041 pp->private = NULL; 1042 entry->gpe_pp = NULL; 1043 g_wither_provider(pp, ENXIO); 1044 } 1045 entry->gpe_deleted = 1; 1046 } 1047 if (entry->gpe_deleted) { 1048 LIST_REMOVE(entry, gpe_entry); 1049 g_free(entry); 1050 } 1051 } 1052 1053 g_topology_unlock(); 1054 1055 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1056 table->gpt_created) ? 1 : 0; 1057 1058 if (reprobe) { 1059 if (!LIST_EMPTY(&table->gpt_entry)) { 1060 error = EBUSY; 1061 goto fail; 1062 } 1063 error = g_part_probe(gp, cp, table->gpt_depth); 1064 if (error) { 1065 g_topology_lock(); 1066 g_access(cp, -1, -1, -1); 1067 g_part_wither(gp, error); 1068 return (0); 1069 } 1070 table = gp->softc; 1071 } 1072 1073 error = G_PART_READ(table, cp); 1074 if (error) 1075 goto fail; 1076 1077 g_topology_lock(); 1078 1079 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1080 if (!entry->gpe_internal) 1081 g_part_new_provider(gp, table, entry); 1082 } 1083 1084 table->gpt_opened = 0; 1085 g_access(cp, -1, -1, -1); 1086 return (0); 1087 1088fail: 1089 g_topology_lock(); 1090 gctl_error(req, "%d", error); 1091 return (error); 1092} 1093 1094static void 1095g_part_wither(struct g_geom *gp, int error) 1096{ 1097 struct g_part_entry *entry; 1098 struct g_part_table *table; 1099 1100 table = gp->softc; 1101 if (table != NULL) { 1102 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1103 LIST_REMOVE(entry, gpe_entry); 1104 g_free(entry); 1105 } 1106 if (gp->softc != NULL) { 1107 kobj_delete((kobj_t)gp->softc, M_GEOM); 1108 gp->softc = NULL; 1109 } 1110 } 1111 g_wither_geom(gp, error); 1112} 1113 1114/* 1115 * Class methods. 1116 */ 1117 1118static void 1119g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1120{ 1121 struct g_part_parms gpp; 1122 struct g_part_table *table; 1123 struct gctl_req_arg *ap; 1124 const char *p; 1125 enum g_part_ctl ctlreq; 1126 unsigned int i, mparms, oparms, parm; 1127 int auto_commit, close_on_error; 1128 int error, len, modifies; 1129 1130 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1131 g_topology_assert(); 1132 1133 ctlreq = G_PART_CTL_NONE; 1134 modifies = 1; 1135 mparms = 0; 1136 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1137 switch (*verb) { 1138 case 'a': 1139 if (!strcmp(verb, "add")) { 1140 ctlreq = G_PART_CTL_ADD; 1141 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1142 G_PART_PARM_START | G_PART_PARM_TYPE; 1143 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1144 } 1145 break; 1146 case 'b': 1147 if (!strcmp(verb, "bootcode")) { 1148 ctlreq = G_PART_CTL_BOOTCODE; 1149 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1150 } 1151 break; 1152 case 'c': 1153 if (!strcmp(verb, "commit")) { 1154 ctlreq = G_PART_CTL_COMMIT; 1155 mparms |= G_PART_PARM_GEOM; 1156 modifies = 0; 1157 } else if (!strcmp(verb, "create")) { 1158 ctlreq = G_PART_CTL_CREATE; 1159 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1160 oparms |= G_PART_PARM_ENTRIES; 1161 } 1162 break; 1163 case 'd': 1164 if (!strcmp(verb, "delete")) { 1165 ctlreq = G_PART_CTL_DELETE; 1166 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1167 } else if (!strcmp(verb, "destroy")) { 1168 ctlreq = G_PART_CTL_DESTROY; 1169 mparms |= G_PART_PARM_GEOM; 1170 } 1171 break; 1172 case 'm': 1173 if (!strcmp(verb, "modify")) { 1174 ctlreq = G_PART_CTL_MODIFY; 1175 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1176 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1177 } else if (!strcmp(verb, "move")) { 1178 ctlreq = G_PART_CTL_MOVE; 1179 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1180 } 1181 break; 1182 case 'r': 1183 if (!strcmp(verb, "recover")) { 1184 ctlreq = G_PART_CTL_RECOVER; 1185 mparms |= G_PART_PARM_GEOM; 1186 } else if (!strcmp(verb, "resize")) { 1187 ctlreq = G_PART_CTL_RESIZE; 1188 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1189 } 1190 break; 1191 case 's': 1192 if (!strcmp(verb, "set")) { 1193 ctlreq = G_PART_CTL_SET; 1194 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1195 G_PART_PARM_INDEX; 1196 } 1197 break; 1198 case 'u': 1199 if (!strcmp(verb, "undo")) { 1200 ctlreq = G_PART_CTL_UNDO; 1201 mparms |= G_PART_PARM_GEOM; 1202 modifies = 0; 1203 } else if (!strcmp(verb, "unset")) { 1204 ctlreq = G_PART_CTL_UNSET; 1205 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1206 G_PART_PARM_INDEX; 1207 } 1208 break; 1209 } 1210 if (ctlreq == G_PART_CTL_NONE) { 1211 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1212 return; 1213 } 1214 1215 bzero(&gpp, sizeof(gpp)); 1216 for (i = 0; i < req->narg; i++) { 1217 ap = &req->arg[i]; 1218 parm = 0; 1219 switch (ap->name[0]) { 1220 case 'a': 1221 if (!strcmp(ap->name, "attrib")) 1222 parm = G_PART_PARM_ATTRIB; 1223 break; 1224 case 'b': 1225 if (!strcmp(ap->name, "bootcode")) 1226 parm = G_PART_PARM_BOOTCODE; 1227 break; 1228 case 'c': 1229 if (!strcmp(ap->name, "class")) 1230 continue; 1231 break; 1232 case 'e': 1233 if (!strcmp(ap->name, "entries")) 1234 parm = G_PART_PARM_ENTRIES; 1235 break; 1236 case 'f': 1237 if (!strcmp(ap->name, "flags")) 1238 parm = G_PART_PARM_FLAGS; 1239 break; 1240 case 'g': 1241 if (!strcmp(ap->name, "geom")) 1242 parm = G_PART_PARM_GEOM; 1243 break; 1244 case 'i': 1245 if (!strcmp(ap->name, "index")) 1246 parm = G_PART_PARM_INDEX; 1247 break; 1248 case 'l': 1249 if (!strcmp(ap->name, "label")) 1250 parm = G_PART_PARM_LABEL; 1251 break; 1252 case 'o': 1253 if (!strcmp(ap->name, "output")) 1254 parm = G_PART_PARM_OUTPUT; 1255 break; 1256 case 'p': 1257 if (!strcmp(ap->name, "provider")) 1258 parm = G_PART_PARM_PROVIDER; 1259 break; 1260 case 's': 1261 if (!strcmp(ap->name, "scheme")) 1262 parm = G_PART_PARM_SCHEME; 1263 else if (!strcmp(ap->name, "size")) 1264 parm = G_PART_PARM_SIZE; 1265 else if (!strcmp(ap->name, "start")) 1266 parm = G_PART_PARM_START; 1267 break; 1268 case 't': 1269 if (!strcmp(ap->name, "type")) 1270 parm = G_PART_PARM_TYPE; 1271 break; 1272 case 'v': 1273 if (!strcmp(ap->name, "verb")) 1274 continue; 1275 else if (!strcmp(ap->name, "version")) 1276 parm = G_PART_PARM_VERSION; 1277 break; 1278 } 1279 if ((parm & (mparms | oparms)) == 0) { 1280 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1281 return; 1282 } 1283 if (parm == G_PART_PARM_BOOTCODE) 1284 p = gctl_get_param(req, ap->name, &len); 1285 else 1286 p = gctl_get_asciiparam(req, ap->name); 1287 if (p == NULL) { 1288 gctl_error(req, "%d param '%s'", ENOATTR, ap->name); 1289 return; 1290 } 1291 switch (parm) { 1292 case G_PART_PARM_ATTRIB: 1293 error = g_part_parm_str(p, &gpp.gpp_attrib); 1294 break; 1295 case G_PART_PARM_BOOTCODE: 1296 gpp.gpp_codeptr = p; 1297 gpp.gpp_codesize = len; 1298 error = 0; 1299 break; 1300 case G_PART_PARM_ENTRIES: 1301 error = g_part_parm_uint(p, &gpp.gpp_entries); 1302 break; 1303 case G_PART_PARM_FLAGS: 1304 if (p[0] == '\0') 1305 continue; 1306 error = g_part_parm_str(p, &gpp.gpp_flags); 1307 break; 1308 case G_PART_PARM_GEOM: 1309 error = g_part_parm_geom(p, &gpp.gpp_geom); 1310 break; 1311 case G_PART_PARM_INDEX: 1312 error = g_part_parm_uint(p, &gpp.gpp_index); 1313 break; 1314 case G_PART_PARM_LABEL: 1315 /* An empty label is always valid. */ 1316 gpp.gpp_label = p; 1317 error = 0; 1318 break; 1319 case G_PART_PARM_OUTPUT: 1320 error = 0; /* Write-only parameter */ 1321 break; 1322 case G_PART_PARM_PROVIDER: 1323 error = g_part_parm_provider(p, &gpp.gpp_provider); 1324 break; 1325 case G_PART_PARM_SCHEME: 1326 error = g_part_parm_scheme(p, &gpp.gpp_scheme); 1327 break; 1328 case G_PART_PARM_SIZE: 1329 error = g_part_parm_quad(p, &gpp.gpp_size); 1330 break; 1331 case G_PART_PARM_START: 1332 error = g_part_parm_quad(p, &gpp.gpp_start); 1333 break; 1334 case G_PART_PARM_TYPE: 1335 error = g_part_parm_str(p, &gpp.gpp_type); 1336 break; 1337 case G_PART_PARM_VERSION: 1338 error = g_part_parm_uint(p, &gpp.gpp_version); 1339 break; 1340 default: 1341 error = EDOOFUS; 1342 break; 1343 } 1344 if (error) { 1345 gctl_error(req, "%d %s '%s'", error, ap->name, p); 1346 return; 1347 } 1348 gpp.gpp_parms |= parm; 1349 } 1350 if ((gpp.gpp_parms & mparms) != mparms) { 1351 parm = mparms - (gpp.gpp_parms & mparms); 1352 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1353 return; 1354 } 1355 1356 /* Obtain permissions if possible/necessary. */ 1357 close_on_error = 0; 1358 table = NULL; 1359 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1360 table = gpp.gpp_geom->softc; 1361 if (table != NULL && !table->gpt_opened) { 1362 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1363 1, 1, 1); 1364 if (error) { 1365 gctl_error(req, "%d geom '%s'", error, 1366 gpp.gpp_geom->name); 1367 return; 1368 } 1369 table->gpt_opened = 1; 1370 close_on_error = 1; 1371 } 1372 } 1373 1374 /* Allow the scheme to check or modify the parameters. */ 1375 if (table != NULL) { 1376 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1377 if (error) { 1378 gctl_error(req, "%d pre-check failed", error); 1379 goto out; 1380 } 1381 } else 1382 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1383 1384 switch (ctlreq) { 1385 case G_PART_CTL_NONE: 1386 panic("%s", __func__); 1387 case G_PART_CTL_ADD: 1388 error = g_part_ctl_add(req, &gpp); 1389 break; 1390 case G_PART_CTL_BOOTCODE: 1391 error = g_part_ctl_bootcode(req, &gpp); 1392 break; 1393 case G_PART_CTL_COMMIT: 1394 error = g_part_ctl_commit(req, &gpp); 1395 break; 1396 case G_PART_CTL_CREATE: 1397 error = g_part_ctl_create(req, &gpp); 1398 break; 1399 case G_PART_CTL_DELETE: 1400 error = g_part_ctl_delete(req, &gpp); 1401 break; 1402 case G_PART_CTL_DESTROY: 1403 error = g_part_ctl_destroy(req, &gpp); 1404 break; 1405 case G_PART_CTL_MODIFY: 1406 error = g_part_ctl_modify(req, &gpp); 1407 break; 1408 case G_PART_CTL_MOVE: 1409 error = g_part_ctl_move(req, &gpp); 1410 break; 1411 case G_PART_CTL_RECOVER: 1412 error = g_part_ctl_recover(req, &gpp); 1413 break; 1414 case G_PART_CTL_RESIZE: 1415 error = g_part_ctl_resize(req, &gpp); 1416 break; 1417 case G_PART_CTL_SET: 1418 error = g_part_ctl_setunset(req, &gpp, 1); 1419 break; 1420 case G_PART_CTL_UNDO: 1421 error = g_part_ctl_undo(req, &gpp); 1422 break; 1423 case G_PART_CTL_UNSET: 1424 error = g_part_ctl_setunset(req, &gpp, 0); 1425 break; 1426 } 1427 1428 /* Implement automatic commit. */ 1429 if (!error) { 1430 auto_commit = (modifies && 1431 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1432 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1433 if (auto_commit) { 1434 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__)); 1435 error = g_part_ctl_commit(req, &gpp); 1436 } 1437 } 1438 1439 out: 1440 if (error && close_on_error) { 1441 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1442 table->gpt_opened = 0; 1443 } 1444} 1445 1446static int 1447g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1448 struct g_geom *gp) 1449{ 1450 1451 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1452 g_topology_assert(); 1453 1454 g_part_wither(gp, EINVAL); 1455 return (0); 1456} 1457 1458static struct g_geom * 1459g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1460{ 1461 struct g_consumer *cp; 1462 struct g_geom *gp; 1463 struct g_part_entry *entry; 1464 struct g_part_table *table; 1465 struct root_hold_token *rht; 1466 int attr, depth; 1467 int error; 1468 1469 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1470 g_topology_assert(); 1471 1472 /* Skip providers that are already open for writing. */ 1473 if (pp->acw > 0) 1474 return (NULL); 1475 1476 /* 1477 * Create a GEOM with consumer and hook it up to the provider. 1478 * With that we become part of the topology. Optain read access 1479 * to the provider. 1480 */ 1481 gp = g_new_geomf(mp, "%s", pp->name); 1482 cp = g_new_consumer(gp); 1483 error = g_attach(cp, pp); 1484 if (error == 0) 1485 error = g_access(cp, 1, 0, 0); 1486 if (error != 0) { 1487 g_part_wither(gp, error); 1488 return (NULL); 1489 } 1490 1491 rht = root_mount_hold(mp->name); 1492 g_topology_unlock(); 1493 1494 /* 1495 * Short-circuit the whole probing galore when there's no 1496 * media present. 1497 */ 1498 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1499 error = ENODEV; 1500 goto fail; 1501 } 1502 1503 /* Make sure we can nest and if so, determine our depth. */ 1504 error = g_getattr("PART::isleaf", cp, &attr); 1505 if (!error && attr) { 1506 error = ENODEV; 1507 goto fail; 1508 } 1509 error = g_getattr("PART::depth", cp, &attr); 1510 depth = (!error) ? attr + 1 : 0; 1511 1512 error = g_part_probe(gp, cp, depth); 1513 if (error) 1514 goto fail; 1515 1516 table = gp->softc; 1517 1518 /* 1519 * Synthesize a disk geometry. Some partitioning schemes 1520 * depend on it and since some file systems need it even 1521 * when the partitition scheme doesn't, we do it here in 1522 * scheme-independent code. 1523 */ 1524 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1525 1526 error = G_PART_READ(table, cp); 1527 if (error) 1528 goto fail; 1529 1530 g_topology_lock(); 1531 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1532 if (!entry->gpe_internal) 1533 g_part_new_provider(gp, table, entry); 1534 } 1535 1536 root_mount_rel(rht); 1537 g_access(cp, -1, 0, 0); 1538 return (gp); 1539 1540 fail: 1541 g_topology_lock(); 1542 root_mount_rel(rht); 1543 g_access(cp, -1, 0, 0); 1544 g_part_wither(gp, error); 1545 return (NULL); 1546} 1547 1548/* 1549 * Geom methods. 1550 */ 1551 1552static int 1553g_part_access(struct g_provider *pp, int dr, int dw, int de) 1554{ 1555 struct g_consumer *cp; 1556 1557 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1558 dw, de)); 1559 1560 cp = LIST_FIRST(&pp->geom->consumer); 1561 1562 /* We always gain write-exclusive access. */ 1563 return (g_access(cp, dr, dw, dw + de)); 1564} 1565 1566static void 1567g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1568 struct g_consumer *cp, struct g_provider *pp) 1569{ 1570 char buf[64]; 1571 struct g_part_entry *entry; 1572 struct g_part_table *table; 1573 1574 KASSERT(sb != NULL && gp != NULL, (__func__)); 1575 table = gp->softc; 1576 1577 if (indent == NULL) { 1578 KASSERT(cp == NULL && pp != NULL, (__func__)); 1579 entry = pp->private; 1580 if (entry == NULL) 1581 return; 1582 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1583 (uintmax_t)entry->gpe_offset, 1584 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1585 /* 1586 * libdisk compatibility quirk - the scheme dumps the 1587 * slicer name and partition type in a way that is 1588 * compatible with libdisk. When libdisk is not used 1589 * anymore, this should go away. 1590 */ 1591 G_PART_DUMPCONF(table, entry, sb, indent); 1592 } else if (cp != NULL) { /* Consumer configuration. */ 1593 KASSERT(pp == NULL, (__func__)); 1594 /* none */ 1595 } else if (pp != NULL) { /* Provider configuration. */ 1596 entry = pp->private; 1597 if (entry == NULL) 1598 return; 1599 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 1600 (uintmax_t)entry->gpe_start); 1601 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 1602 (uintmax_t)entry->gpe_end); 1603 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1604 entry->gpe_index); 1605 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1606 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1607 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1608 (uintmax_t)entry->gpe_offset); 1609 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1610 (uintmax_t)pp->mediasize); 1611 G_PART_DUMPCONF(table, entry, sb, indent); 1612 } else { /* Geom configuration. */ 1613 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1614 table->gpt_scheme->name); 1615 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1616 table->gpt_entries); 1617 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1618 (uintmax_t)table->gpt_first); 1619 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1620 (uintmax_t)table->gpt_last); 1621 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 1622 table->gpt_sectors); 1623 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 1624 table->gpt_heads); 1625 G_PART_DUMPCONF(table, NULL, sb, indent); 1626 } 1627} 1628 1629static void 1630g_part_orphan(struct g_consumer *cp) 1631{ 1632 struct g_provider *pp; 1633 1634 pp = cp->provider; 1635 KASSERT(pp != NULL, (__func__)); 1636 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1637 g_topology_assert(); 1638 1639 KASSERT(pp->error != 0, (__func__)); 1640 g_part_wither(cp->geom, pp->error); 1641} 1642 1643static void 1644g_part_spoiled(struct g_consumer *cp) 1645{ 1646 1647 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1648 g_topology_assert(); 1649 1650 g_part_wither(cp->geom, ENXIO); 1651} 1652 1653static void 1654g_part_start(struct bio *bp) 1655{ 1656 struct bio *bp2; 1657 struct g_consumer *cp; 1658 struct g_geom *gp; 1659 struct g_part_entry *entry; 1660 struct g_part_table *table; 1661 struct g_kerneldump *gkd; 1662 struct g_provider *pp; 1663 1664 pp = bp->bio_to; 1665 gp = pp->geom; 1666 table = gp->softc; 1667 cp = LIST_FIRST(&gp->consumer); 1668 1669 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 1670 pp->name)); 1671 1672 entry = pp->private; 1673 if (entry == NULL) { 1674 g_io_deliver(bp, ENXIO); 1675 return; 1676 } 1677 1678 switch(bp->bio_cmd) { 1679 case BIO_DELETE: 1680 case BIO_READ: 1681 case BIO_WRITE: 1682 if (bp->bio_offset >= pp->mediasize) { 1683 g_io_deliver(bp, EIO); 1684 return; 1685 } 1686 bp2 = g_clone_bio(bp); 1687 if (bp2 == NULL) { 1688 g_io_deliver(bp, ENOMEM); 1689 return; 1690 } 1691 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 1692 bp2->bio_length = pp->mediasize - bp2->bio_offset; 1693 bp2->bio_done = g_std_done; 1694 bp2->bio_offset += entry->gpe_offset; 1695 g_io_request(bp2, cp); 1696 return; 1697 case BIO_FLUSH: 1698 break; 1699 case BIO_GETATTR: 1700 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 1701 return; 1702 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 1703 return; 1704 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 1705 return; 1706 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 1707 return; 1708 if (g_handleattr_str(bp, "PART::scheme", 1709 table->gpt_scheme->name)) 1710 return; 1711 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 1712 /* 1713 * Check that the partition is suitable for kernel 1714 * dumps. Typically only swap partitions should be 1715 * used. 1716 */ 1717 if (!G_PART_DUMPTO(table, entry)) { 1718 g_io_deliver(bp, ENODEV); 1719 printf("GEOM_PART: Partition '%s' not suitable" 1720 " for kernel dumps (wrong type?)\n", 1721 pp->name); 1722 return; 1723 } 1724 gkd = (struct g_kerneldump *)bp->bio_data; 1725 if (gkd->offset >= pp->mediasize) { 1726 g_io_deliver(bp, EIO); 1727 return; 1728 } 1729 if (gkd->offset + gkd->length > pp->mediasize) 1730 gkd->length = pp->mediasize - gkd->offset; 1731 gkd->offset += entry->gpe_offset; 1732 } 1733 break; 1734 default: 1735 g_io_deliver(bp, EOPNOTSUPP); 1736 return; 1737 } 1738 1739 bp2 = g_clone_bio(bp); 1740 if (bp2 == NULL) { 1741 g_io_deliver(bp, ENOMEM); 1742 return; 1743 } 1744 bp2->bio_done = g_std_done; 1745 g_io_request(bp2, cp); 1746} 1747 1748static void 1749g_part_init(struct g_class *mp) 1750{ 1751 1752 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 1753} 1754 1755static void 1756g_part_fini(struct g_class *mp) 1757{ 1758 1759 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 1760} 1761 1762static void 1763g_part_unload_event(void *arg, int flag) 1764{ 1765 struct g_consumer *cp; 1766 struct g_geom *gp; 1767 struct g_provider *pp; 1768 struct g_part_scheme *scheme; 1769 struct g_part_table *table; 1770 uintptr_t *xchg; 1771 int acc, error; 1772 1773 if (flag == EV_CANCEL) 1774 return; 1775 1776 xchg = arg; 1777 error = 0; 1778 scheme = (void *)(*xchg); 1779 1780 g_topology_assert(); 1781 1782 LIST_FOREACH(gp, &g_part_class.geom, geom) { 1783 table = gp->softc; 1784 if (table->gpt_scheme != scheme) 1785 continue; 1786 1787 acc = 0; 1788 LIST_FOREACH(pp, &gp->provider, provider) 1789 acc += pp->acr + pp->acw + pp->ace; 1790 LIST_FOREACH(cp, &gp->consumer, consumer) 1791 acc += cp->acr + cp->acw + cp->ace; 1792 1793 if (!acc) 1794 g_part_wither(gp, ENOSYS); 1795 else 1796 error = EBUSY; 1797 } 1798 1799 if (!error) 1800 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 1801 1802 *xchg = error; 1803} 1804 1805int 1806g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 1807{ 1808 uintptr_t arg; 1809 int error; 1810 1811 switch (type) { 1812 case MOD_LOAD: 1813 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list); 1814 1815 error = g_retaste(&g_part_class); 1816 if (error) 1817 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 1818 break; 1819 case MOD_UNLOAD: 1820 arg = (uintptr_t)scheme; 1821 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 1822 NULL); 1823 if (!error) 1824 error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg; 1825 break; 1826 default: 1827 error = EOPNOTSUPP; 1828 break; 1829 } 1830 1831 return (error); 1832} 1833