g_part.c revision 221984
1/*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/geom/part/g_part.c 221984 2011-05-16 12:00:32Z ae $"); 29 30#include <sys/param.h> 31#include <sys/bio.h> 32#include <sys/diskmbr.h> 33#include <sys/endian.h> 34#include <sys/kernel.h> 35#include <sys/kobj.h> 36#include <sys/limits.h> 37#include <sys/lock.h> 38#include <sys/malloc.h> 39#include <sys/mutex.h> 40#include <sys/queue.h> 41#include <sys/sbuf.h> 42#include <sys/sysctl.h> 43#include <sys/systm.h> 44#include <sys/uuid.h> 45#include <geom/geom.h> 46#include <geom/geom_ctl.h> 47#include <geom/geom_int.h> 48#include <geom/part/g_part.h> 49 50#include "g_part_if.h" 51 52#ifndef _PATH_DEV 53#define _PATH_DEV "/dev/" 54#endif 55 56static kobj_method_t g_part_null_methods[] = { 57 { 0, 0 } 58}; 59 60static struct g_part_scheme g_part_null_scheme = { 61 "(none)", 62 g_part_null_methods, 63 sizeof(struct g_part_table), 64}; 65 66TAILQ_HEAD(, g_part_scheme) g_part_schemes = 67 TAILQ_HEAD_INITIALIZER(g_part_schemes); 68 69struct g_part_alias_list { 70 const char *lexeme; 71 enum g_part_alias alias; 72} g_part_alias_list[G_PART_ALIAS_COUNT] = { 73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "ebr", G_PART_ALIAS_EBR }, 82 { "efi", G_PART_ALIAS_EFI }, 83 { "fat32", G_PART_ALIAS_MS_FAT32 }, 84 { "freebsd", G_PART_ALIAS_FREEBSD }, 85 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 86 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 87 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 88 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 89 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 90 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 91 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 92 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 93 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 94 { "mbr", G_PART_ALIAS_MBR }, 95 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 96 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 97 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 98 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 99 { "ntfs", G_PART_ALIAS_MS_NTFS }, 100 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 101 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 102 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 103 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 104 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 105 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 106}; 107 108SYSCTL_DECL(_kern_geom); 109SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, "GEOM_PART stuff"); 110static u_int check_integrity = 1; 111TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity); 112SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, CTLFLAG_RW, 113 &check_integrity, 1, "Enable integrity checking"); 114 115/* 116 * The GEOM partitioning class. 117 */ 118static g_ctl_req_t g_part_ctlreq; 119static g_ctl_destroy_geom_t g_part_destroy_geom; 120static g_fini_t g_part_fini; 121static g_init_t g_part_init; 122static g_taste_t g_part_taste; 123 124static g_access_t g_part_access; 125static g_dumpconf_t g_part_dumpconf; 126static g_orphan_t g_part_orphan; 127static g_spoiled_t g_part_spoiled; 128static g_start_t g_part_start; 129 130static struct g_class g_part_class = { 131 .name = "PART", 132 .version = G_VERSION, 133 /* Class methods. */ 134 .ctlreq = g_part_ctlreq, 135 .destroy_geom = g_part_destroy_geom, 136 .fini = g_part_fini, 137 .init = g_part_init, 138 .taste = g_part_taste, 139 /* Geom methods. */ 140 .access = g_part_access, 141 .dumpconf = g_part_dumpconf, 142 .orphan = g_part_orphan, 143 .spoiled = g_part_spoiled, 144 .start = g_part_start, 145}; 146 147DECLARE_GEOM_CLASS(g_part_class, g_part); 148 149/* 150 * Support functions. 151 */ 152 153static void g_part_wither(struct g_geom *, int); 154 155const char * 156g_part_alias_name(enum g_part_alias alias) 157{ 158 int i; 159 160 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 161 if (g_part_alias_list[i].alias != alias) 162 continue; 163 return (g_part_alias_list[i].lexeme); 164 } 165 166 return (NULL); 167} 168 169void 170g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 171 u_int *bestheads) 172{ 173 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 174 off_t chs, cylinders; 175 u_int heads; 176 int idx; 177 178 *bestchs = 0; 179 *bestheads = 0; 180 for (idx = 0; candidate_heads[idx] != 0; idx++) { 181 heads = candidate_heads[idx]; 182 cylinders = blocks / heads / sectors; 183 if (cylinders < heads || cylinders < sectors) 184 break; 185 if (cylinders > 1023) 186 continue; 187 chs = cylinders * heads * sectors; 188 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 189 *bestchs = chs; 190 *bestheads = heads; 191 } 192 } 193} 194 195static void 196g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 197 off_t blocks) 198{ 199 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 200 off_t chs, bestchs; 201 u_int heads, sectors; 202 int idx; 203 204 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 205 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 206 table->gpt_fixgeom = 0; 207 table->gpt_heads = 0; 208 table->gpt_sectors = 0; 209 bestchs = 0; 210 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 211 sectors = candidate_sectors[idx]; 212 g_part_geometry_heads(blocks, sectors, &chs, &heads); 213 if (chs == 0) 214 continue; 215 /* 216 * Prefer a geometry with sectors > 1, but only if 217 * it doesn't bump down the numbver of heads to 1. 218 */ 219 if (chs > bestchs || (chs == bestchs && heads > 1 && 220 table->gpt_sectors == 1)) { 221 bestchs = chs; 222 table->gpt_heads = heads; 223 table->gpt_sectors = sectors; 224 } 225 } 226 /* 227 * If we didn't find a geometry at all, then the disk is 228 * too big. This means we can use the maximum number of 229 * heads and sectors. 230 */ 231 if (bestchs == 0) { 232 table->gpt_heads = 255; 233 table->gpt_sectors = 63; 234 } 235 } else { 236 table->gpt_fixgeom = 1; 237 table->gpt_heads = heads; 238 table->gpt_sectors = sectors; 239 } 240} 241 242static int 243g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 244{ 245 struct g_part_entry *e1, *e2; 246 struct g_provider *pp; 247 248 e1 = e2 = NULL; 249 pp = cp->provider; 250 if (table->gpt_first > table->gpt_last || 251 table->gpt_last > pp->mediasize / pp->sectorsize - 1) 252 goto fail; 253 254 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 255 if (e1->gpe_deleted || e1->gpe_internal) 256 continue; 257 if (e1->gpe_start < table->gpt_first || 258 e1->gpe_start > table->gpt_last || 259 e1->gpe_end < e1->gpe_start || 260 e1->gpe_end > table->gpt_last) 261 goto fail; 262 e2 = e1; 263 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 264 if (e2->gpe_deleted || e2->gpe_internal) 265 continue; 266 if (e1->gpe_start >= e2->gpe_start && 267 e1->gpe_start <= e2->gpe_end) 268 goto fail; 269 if (e1->gpe_end >= e2->gpe_start && 270 e1->gpe_end <= e2->gpe_end) 271 goto fail; 272 if (e1->gpe_start < e2->gpe_start && 273 e1->gpe_end > e2->gpe_end) 274 goto fail; 275 } 276 } 277 return (0); 278fail: 279 printf("GEOM_PART: integrity check failed (%s, %s)\n", pp->name, 280 table->gpt_scheme->name); 281 if (bootverbose) { 282 if (e1 == NULL) 283 printf("GEOM_PART: invalid geom configuration:\n"); 284 else if (e2 == NULL) 285 printf("GEOM_PART: invalid partition entry:\n"); 286 else 287 printf("GEOM_PART: overlapped partition entries:\n"); 288 if (e1 != NULL) 289 printf("GEOM_PART: index: %d, start: %jd, end: %jd\n", 290 e1->gpe_index, 291 (intmax_t)e1->gpe_start, (intmax_t)e1->gpe_end); 292 if (e2 != NULL) 293 printf("GEOM_PART: index: %d, start: %jd, end: %jd\n", 294 e2->gpe_index, 295 (intmax_t)e2->gpe_start, (intmax_t)e2->gpe_end); 296 printf("GEOM_PART: first: %jd, last: %jd, sectors: %jd\n", 297 (intmax_t)table->gpt_first, (intmax_t)table->gpt_last, 298 (intmax_t)pp->mediasize / pp->sectorsize - 1); 299 } 300 if (check_integrity == 0) { 301 table->gpt_corrupt = 1; 302 return (0); 303 } 304 return (EINVAL); 305} 306 307struct g_part_entry * 308g_part_new_entry(struct g_part_table *table, int index, quad_t start, 309 quad_t end) 310{ 311 struct g_part_entry *entry, *last; 312 313 last = NULL; 314 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 315 if (entry->gpe_index == index) 316 break; 317 if (entry->gpe_index > index) { 318 entry = NULL; 319 break; 320 } 321 last = entry; 322 } 323 if (entry == NULL) { 324 entry = g_malloc(table->gpt_scheme->gps_entrysz, 325 M_WAITOK | M_ZERO); 326 entry->gpe_index = index; 327 if (last == NULL) 328 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 329 else 330 LIST_INSERT_AFTER(last, entry, gpe_entry); 331 } else 332 entry->gpe_offset = 0; 333 entry->gpe_start = start; 334 entry->gpe_end = end; 335 return (entry); 336} 337 338static void 339g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 340 struct g_part_entry *entry) 341{ 342 struct g_consumer *cp; 343 struct g_provider *pp; 344 struct sbuf *sb; 345 off_t offset; 346 347 cp = LIST_FIRST(&gp->consumer); 348 pp = cp->provider; 349 350 offset = entry->gpe_start * pp->sectorsize; 351 if (entry->gpe_offset < offset) 352 entry->gpe_offset = offset; 353 354 if (entry->gpe_pp == NULL) { 355 sb = sbuf_new_auto(); 356 G_PART_FULLNAME(table, entry, sb, gp->name); 357 sbuf_finish(sb); 358 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 359 sbuf_delete(sb); 360 entry->gpe_pp->private = entry; /* Close the circle. */ 361 } 362 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 363 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 364 pp->sectorsize; 365 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 366 entry->gpe_pp->sectorsize = pp->sectorsize; 367 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE; 368 entry->gpe_pp->stripesize = pp->stripesize; 369 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 370 if (pp->stripesize > 0) 371 entry->gpe_pp->stripeoffset %= pp->stripesize; 372 g_error_provider(entry->gpe_pp, 0); 373} 374 375static struct g_geom* 376g_part_find_geom(const char *name) 377{ 378 struct g_geom *gp; 379 LIST_FOREACH(gp, &g_part_class.geom, geom) { 380 if (!strcmp(name, gp->name)) 381 break; 382 } 383 return (gp); 384} 385 386static int 387g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 388{ 389 struct g_geom *gp; 390 const char *gname; 391 392 gname = gctl_get_asciiparam(req, name); 393 if (gname == NULL) 394 return (ENOATTR); 395 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 396 gname += sizeof(_PATH_DEV) - 1; 397 gp = g_part_find_geom(gname); 398 if (gp == NULL) { 399 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 400 return (EINVAL); 401 } 402 *v = gp; 403 return (0); 404} 405 406static int 407g_part_parm_provider(struct gctl_req *req, const char *name, 408 struct g_provider **v) 409{ 410 struct g_provider *pp; 411 const char *pname; 412 413 pname = gctl_get_asciiparam(req, name); 414 if (pname == NULL) 415 return (ENOATTR); 416 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 417 pname += sizeof(_PATH_DEV) - 1; 418 pp = g_provider_by_name(pname); 419 if (pp == NULL) { 420 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 421 return (EINVAL); 422 } 423 *v = pp; 424 return (0); 425} 426 427static int 428g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 429{ 430 const char *p; 431 char *x; 432 quad_t q; 433 434 p = gctl_get_asciiparam(req, name); 435 if (p == NULL) 436 return (ENOATTR); 437 q = strtoq(p, &x, 0); 438 if (*x != '\0' || q < 0) { 439 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 440 return (EINVAL); 441 } 442 *v = q; 443 return (0); 444} 445 446static int 447g_part_parm_scheme(struct gctl_req *req, const char *name, 448 struct g_part_scheme **v) 449{ 450 struct g_part_scheme *s; 451 const char *p; 452 453 p = gctl_get_asciiparam(req, name); 454 if (p == NULL) 455 return (ENOATTR); 456 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 457 if (s == &g_part_null_scheme) 458 continue; 459 if (!strcasecmp(s->name, p)) 460 break; 461 } 462 if (s == NULL) { 463 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 464 return (EINVAL); 465 } 466 *v = s; 467 return (0); 468} 469 470static int 471g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 472{ 473 const char *p; 474 475 p = gctl_get_asciiparam(req, name); 476 if (p == NULL) 477 return (ENOATTR); 478 /* An empty label is always valid. */ 479 if (strcmp(name, "label") != 0 && p[0] == '\0') { 480 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 481 return (EINVAL); 482 } 483 *v = p; 484 return (0); 485} 486 487static int 488g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 489{ 490 const intmax_t *p; 491 int size; 492 493 p = gctl_get_param(req, name, &size); 494 if (p == NULL) 495 return (ENOATTR); 496 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 497 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 498 return (EINVAL); 499 } 500 *v = (u_int)*p; 501 return (0); 502} 503 504static int 505g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 506{ 507 const uint32_t *p; 508 int size; 509 510 p = gctl_get_param(req, name, &size); 511 if (p == NULL) 512 return (ENOATTR); 513 if (size != sizeof(*p) || *p > INT_MAX) { 514 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 515 return (EINVAL); 516 } 517 *v = (u_int)*p; 518 return (0); 519} 520 521static int 522g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 523 unsigned int *s) 524{ 525 const void *p; 526 int size; 527 528 p = gctl_get_param(req, name, &size); 529 if (p == NULL) 530 return (ENOATTR); 531 *v = p; 532 *s = size; 533 return (0); 534} 535 536static int 537g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 538{ 539 struct g_part_scheme *iter, *scheme; 540 struct g_part_table *table; 541 int pri, probe; 542 543 table = gp->softc; 544 scheme = (table != NULL) ? table->gpt_scheme : NULL; 545 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 546 if (pri == 0) 547 goto done; 548 if (pri > 0) { /* error */ 549 scheme = NULL; 550 pri = INT_MIN; 551 } 552 553 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 554 if (iter == &g_part_null_scheme) 555 continue; 556 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 557 M_WAITOK); 558 table->gpt_gp = gp; 559 table->gpt_scheme = iter; 560 table->gpt_depth = depth; 561 probe = G_PART_PROBE(table, cp); 562 if (probe <= 0 && probe > pri) { 563 pri = probe; 564 scheme = iter; 565 if (gp->softc != NULL) 566 kobj_delete((kobj_t)gp->softc, M_GEOM); 567 gp->softc = table; 568 if (pri == 0) 569 goto done; 570 } else 571 kobj_delete((kobj_t)table, M_GEOM); 572 } 573 574done: 575 return ((scheme == NULL) ? ENXIO : 0); 576} 577 578/* 579 * Control request functions. 580 */ 581 582static int 583g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 584{ 585 struct g_geom *gp; 586 struct g_provider *pp; 587 struct g_part_entry *delent, *last, *entry; 588 struct g_part_table *table; 589 struct sbuf *sb; 590 quad_t end; 591 unsigned int index; 592 int error; 593 594 gp = gpp->gpp_geom; 595 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 596 g_topology_assert(); 597 598 pp = LIST_FIRST(&gp->consumer)->provider; 599 table = gp->softc; 600 end = gpp->gpp_start + gpp->gpp_size - 1; 601 602 if (gpp->gpp_start < table->gpt_first || 603 gpp->gpp_start > table->gpt_last) { 604 gctl_error(req, "%d start '%jd'", EINVAL, 605 (intmax_t)gpp->gpp_start); 606 return (EINVAL); 607 } 608 if (end < gpp->gpp_start || end > table->gpt_last) { 609 gctl_error(req, "%d size '%jd'", EINVAL, 610 (intmax_t)gpp->gpp_size); 611 return (EINVAL); 612 } 613 if (gpp->gpp_index > table->gpt_entries) { 614 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 615 return (EINVAL); 616 } 617 618 delent = last = NULL; 619 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 620 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 621 if (entry->gpe_deleted) { 622 if (entry->gpe_index == index) 623 delent = entry; 624 continue; 625 } 626 if (entry->gpe_index == index) 627 index = entry->gpe_index + 1; 628 if (entry->gpe_index < index) 629 last = entry; 630 if (entry->gpe_internal) 631 continue; 632 if (gpp->gpp_start >= entry->gpe_start && 633 gpp->gpp_start <= entry->gpe_end) { 634 gctl_error(req, "%d start '%jd'", ENOSPC, 635 (intmax_t)gpp->gpp_start); 636 return (ENOSPC); 637 } 638 if (end >= entry->gpe_start && end <= entry->gpe_end) { 639 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 640 return (ENOSPC); 641 } 642 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 643 gctl_error(req, "%d size '%jd'", ENOSPC, 644 (intmax_t)gpp->gpp_size); 645 return (ENOSPC); 646 } 647 } 648 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 649 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 650 return (EEXIST); 651 } 652 if (index > table->gpt_entries) { 653 gctl_error(req, "%d index '%d'", ENOSPC, index); 654 return (ENOSPC); 655 } 656 657 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 658 M_WAITOK | M_ZERO) : delent; 659 entry->gpe_index = index; 660 entry->gpe_start = gpp->gpp_start; 661 entry->gpe_end = end; 662 error = G_PART_ADD(table, entry, gpp); 663 if (error) { 664 gctl_error(req, "%d", error); 665 if (delent == NULL) 666 g_free(entry); 667 return (error); 668 } 669 if (delent == NULL) { 670 if (last == NULL) 671 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 672 else 673 LIST_INSERT_AFTER(last, entry, gpe_entry); 674 entry->gpe_created = 1; 675 } else { 676 entry->gpe_deleted = 0; 677 entry->gpe_modified = 1; 678 } 679 g_part_new_provider(gp, table, entry); 680 681 /* Provide feedback if so requested. */ 682 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 683 sb = sbuf_new_auto(); 684 G_PART_FULLNAME(table, entry, sb, gp->name); 685 sbuf_cat(sb, " added\n"); 686 sbuf_finish(sb); 687 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 688 sbuf_delete(sb); 689 } 690 return (0); 691} 692 693static int 694g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 695{ 696 struct g_geom *gp; 697 struct g_part_table *table; 698 struct sbuf *sb; 699 int error, sz; 700 701 gp = gpp->gpp_geom; 702 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 703 g_topology_assert(); 704 705 table = gp->softc; 706 sz = table->gpt_scheme->gps_bootcodesz; 707 if (sz == 0) { 708 error = ENODEV; 709 goto fail; 710 } 711 if (gpp->gpp_codesize > sz) { 712 error = EFBIG; 713 goto fail; 714 } 715 716 error = G_PART_BOOTCODE(table, gpp); 717 if (error) 718 goto fail; 719 720 /* Provide feedback if so requested. */ 721 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 722 sb = sbuf_new_auto(); 723 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 724 sbuf_finish(sb); 725 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 726 sbuf_delete(sb); 727 } 728 return (0); 729 730 fail: 731 gctl_error(req, "%d", error); 732 return (error); 733} 734 735static int 736g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 737{ 738 struct g_consumer *cp; 739 struct g_geom *gp; 740 struct g_provider *pp; 741 struct g_part_entry *entry, *tmp; 742 struct g_part_table *table; 743 char *buf; 744 int error, i; 745 746 gp = gpp->gpp_geom; 747 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 748 g_topology_assert(); 749 750 table = gp->softc; 751 if (!table->gpt_opened) { 752 gctl_error(req, "%d", EPERM); 753 return (EPERM); 754 } 755 756 g_topology_unlock(); 757 758 cp = LIST_FIRST(&gp->consumer); 759 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 760 pp = cp->provider; 761 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 762 while (table->gpt_smhead != 0) { 763 i = ffs(table->gpt_smhead) - 1; 764 error = g_write_data(cp, i * pp->sectorsize, buf, 765 pp->sectorsize); 766 if (error) { 767 g_free(buf); 768 goto fail; 769 } 770 table->gpt_smhead &= ~(1 << i); 771 } 772 while (table->gpt_smtail != 0) { 773 i = ffs(table->gpt_smtail) - 1; 774 error = g_write_data(cp, pp->mediasize - (i + 1) * 775 pp->sectorsize, buf, pp->sectorsize); 776 if (error) { 777 g_free(buf); 778 goto fail; 779 } 780 table->gpt_smtail &= ~(1 << i); 781 } 782 g_free(buf); 783 } 784 785 if (table->gpt_scheme == &g_part_null_scheme) { 786 g_topology_lock(); 787 g_access(cp, -1, -1, -1); 788 g_part_wither(gp, ENXIO); 789 return (0); 790 } 791 792 error = G_PART_WRITE(table, cp); 793 if (error) 794 goto fail; 795 796 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 797 if (!entry->gpe_deleted) { 798 entry->gpe_created = 0; 799 entry->gpe_modified = 0; 800 continue; 801 } 802 LIST_REMOVE(entry, gpe_entry); 803 g_free(entry); 804 } 805 table->gpt_created = 0; 806 table->gpt_opened = 0; 807 808 g_topology_lock(); 809 g_access(cp, -1, -1, -1); 810 return (0); 811 812fail: 813 g_topology_lock(); 814 gctl_error(req, "%d", error); 815 return (error); 816} 817 818static int 819g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 820{ 821 struct g_consumer *cp; 822 struct g_geom *gp; 823 struct g_provider *pp; 824 struct g_part_scheme *scheme; 825 struct g_part_table *null, *table; 826 struct sbuf *sb; 827 int attr, error; 828 829 pp = gpp->gpp_provider; 830 scheme = gpp->gpp_scheme; 831 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 832 g_topology_assert(); 833 834 /* Check that there isn't already a g_part geom on the provider. */ 835 gp = g_part_find_geom(pp->name); 836 if (gp != NULL) { 837 null = gp->softc; 838 if (null->gpt_scheme != &g_part_null_scheme) { 839 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 840 return (EEXIST); 841 } 842 } else 843 null = NULL; 844 845 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 846 (gpp->gpp_entries < scheme->gps_minent || 847 gpp->gpp_entries > scheme->gps_maxent)) { 848 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 849 return (EINVAL); 850 } 851 852 if (null == NULL) 853 gp = g_new_geomf(&g_part_class, "%s", pp->name); 854 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 855 M_WAITOK); 856 table = gp->softc; 857 table->gpt_gp = gp; 858 table->gpt_scheme = gpp->gpp_scheme; 859 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 860 gpp->gpp_entries : scheme->gps_minent; 861 LIST_INIT(&table->gpt_entry); 862 if (null == NULL) { 863 cp = g_new_consumer(gp); 864 error = g_attach(cp, pp); 865 if (error == 0) 866 error = g_access(cp, 1, 1, 1); 867 if (error != 0) { 868 g_part_wither(gp, error); 869 gctl_error(req, "%d geom '%s'", error, pp->name); 870 return (error); 871 } 872 table->gpt_opened = 1; 873 } else { 874 cp = LIST_FIRST(&gp->consumer); 875 table->gpt_opened = null->gpt_opened; 876 table->gpt_smhead = null->gpt_smhead; 877 table->gpt_smtail = null->gpt_smtail; 878 } 879 880 g_topology_unlock(); 881 882 /* Make sure the provider has media. */ 883 if (pp->mediasize == 0 || pp->sectorsize == 0) { 884 error = ENODEV; 885 goto fail; 886 } 887 888 /* Make sure we can nest and if so, determine our depth. */ 889 error = g_getattr("PART::isleaf", cp, &attr); 890 if (!error && attr) { 891 error = ENODEV; 892 goto fail; 893 } 894 error = g_getattr("PART::depth", cp, &attr); 895 table->gpt_depth = (!error) ? attr + 1 : 0; 896 897 /* 898 * Synthesize a disk geometry. Some partitioning schemes 899 * depend on it and since some file systems need it even 900 * when the partitition scheme doesn't, we do it here in 901 * scheme-independent code. 902 */ 903 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 904 905 error = G_PART_CREATE(table, gpp); 906 if (error) 907 goto fail; 908 909 g_topology_lock(); 910 911 table->gpt_created = 1; 912 if (null != NULL) 913 kobj_delete((kobj_t)null, M_GEOM); 914 915 /* 916 * Support automatic commit by filling in the gpp_geom 917 * parameter. 918 */ 919 gpp->gpp_parms |= G_PART_PARM_GEOM; 920 gpp->gpp_geom = gp; 921 922 /* Provide feedback if so requested. */ 923 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 924 sb = sbuf_new_auto(); 925 sbuf_printf(sb, "%s created\n", gp->name); 926 sbuf_finish(sb); 927 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 928 sbuf_delete(sb); 929 } 930 return (0); 931 932fail: 933 g_topology_lock(); 934 if (null == NULL) { 935 g_access(cp, -1, -1, -1); 936 g_part_wither(gp, error); 937 } else { 938 kobj_delete((kobj_t)gp->softc, M_GEOM); 939 gp->softc = null; 940 } 941 gctl_error(req, "%d provider", error); 942 return (error); 943} 944 945static int 946g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 947{ 948 struct g_geom *gp; 949 struct g_provider *pp; 950 struct g_part_entry *entry; 951 struct g_part_table *table; 952 struct sbuf *sb; 953 954 gp = gpp->gpp_geom; 955 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 956 g_topology_assert(); 957 958 table = gp->softc; 959 960 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 961 if (entry->gpe_deleted || entry->gpe_internal) 962 continue; 963 if (entry->gpe_index == gpp->gpp_index) 964 break; 965 } 966 if (entry == NULL) { 967 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 968 return (ENOENT); 969 } 970 971 pp = entry->gpe_pp; 972 if (pp != NULL) { 973 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 974 gctl_error(req, "%d", EBUSY); 975 return (EBUSY); 976 } 977 978 pp->private = NULL; 979 entry->gpe_pp = NULL; 980 } 981 982 if (pp != NULL) 983 g_wither_provider(pp, ENXIO); 984 985 /* Provide feedback if so requested. */ 986 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 987 sb = sbuf_new_auto(); 988 G_PART_FULLNAME(table, entry, sb, gp->name); 989 sbuf_cat(sb, " deleted\n"); 990 sbuf_finish(sb); 991 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 992 sbuf_delete(sb); 993 } 994 995 if (entry->gpe_created) { 996 LIST_REMOVE(entry, gpe_entry); 997 g_free(entry); 998 } else { 999 entry->gpe_modified = 0; 1000 entry->gpe_deleted = 1; 1001 } 1002 return (0); 1003} 1004 1005static int 1006g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1007{ 1008 struct g_consumer *cp; 1009 struct g_geom *gp; 1010 struct g_provider *pp; 1011 struct g_part_entry *entry, *tmp; 1012 struct g_part_table *null, *table; 1013 struct sbuf *sb; 1014 int error; 1015 1016 gp = gpp->gpp_geom; 1017 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1018 g_topology_assert(); 1019 1020 table = gp->softc; 1021 /* Check for busy providers. */ 1022 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1023 if (entry->gpe_deleted || entry->gpe_internal) 1024 continue; 1025 if (gpp->gpp_force) { 1026 pp = entry->gpe_pp; 1027 if (pp == NULL) 1028 continue; 1029 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1030 continue; 1031 } 1032 gctl_error(req, "%d", EBUSY); 1033 return (EBUSY); 1034 } 1035 1036 if (gpp->gpp_force) { 1037 /* Destroy all providers. */ 1038 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1039 pp = entry->gpe_pp; 1040 if (pp != NULL) { 1041 pp->private = NULL; 1042 g_wither_provider(pp, ENXIO); 1043 } 1044 LIST_REMOVE(entry, gpe_entry); 1045 g_free(entry); 1046 } 1047 } 1048 1049 error = G_PART_DESTROY(table, gpp); 1050 if (error) { 1051 gctl_error(req, "%d", error); 1052 return (error); 1053 } 1054 1055 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1056 M_WAITOK); 1057 null = gp->softc; 1058 null->gpt_gp = gp; 1059 null->gpt_scheme = &g_part_null_scheme; 1060 LIST_INIT(&null->gpt_entry); 1061 1062 cp = LIST_FIRST(&gp->consumer); 1063 pp = cp->provider; 1064 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1065 1066 null->gpt_depth = table->gpt_depth; 1067 null->gpt_opened = table->gpt_opened; 1068 null->gpt_smhead = table->gpt_smhead; 1069 null->gpt_smtail = table->gpt_smtail; 1070 1071 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1072 LIST_REMOVE(entry, gpe_entry); 1073 g_free(entry); 1074 } 1075 kobj_delete((kobj_t)table, M_GEOM); 1076 1077 /* Provide feedback if so requested. */ 1078 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1079 sb = sbuf_new_auto(); 1080 sbuf_printf(sb, "%s destroyed\n", gp->name); 1081 sbuf_finish(sb); 1082 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1083 sbuf_delete(sb); 1084 } 1085 return (0); 1086} 1087 1088static int 1089g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1090{ 1091 struct g_geom *gp; 1092 struct g_part_entry *entry; 1093 struct g_part_table *table; 1094 struct sbuf *sb; 1095 int error; 1096 1097 gp = gpp->gpp_geom; 1098 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1099 g_topology_assert(); 1100 1101 table = gp->softc; 1102 1103 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1104 if (entry->gpe_deleted || entry->gpe_internal) 1105 continue; 1106 if (entry->gpe_index == gpp->gpp_index) 1107 break; 1108 } 1109 if (entry == NULL) { 1110 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1111 return (ENOENT); 1112 } 1113 1114 error = G_PART_MODIFY(table, entry, gpp); 1115 if (error) { 1116 gctl_error(req, "%d", error); 1117 return (error); 1118 } 1119 1120 if (!entry->gpe_created) 1121 entry->gpe_modified = 1; 1122 1123 /* Provide feedback if so requested. */ 1124 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1125 sb = sbuf_new_auto(); 1126 G_PART_FULLNAME(table, entry, sb, gp->name); 1127 sbuf_cat(sb, " modified\n"); 1128 sbuf_finish(sb); 1129 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1130 sbuf_delete(sb); 1131 } 1132 return (0); 1133} 1134 1135static int 1136g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1137{ 1138 gctl_error(req, "%d verb 'move'", ENOSYS); 1139 return (ENOSYS); 1140} 1141 1142static int 1143g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1144{ 1145 struct g_part_table *table; 1146 struct g_geom *gp; 1147 struct sbuf *sb; 1148 int error, recovered; 1149 1150 gp = gpp->gpp_geom; 1151 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1152 g_topology_assert(); 1153 table = gp->softc; 1154 error = recovered = 0; 1155 1156 if (table->gpt_corrupt) { 1157 error = G_PART_RECOVER(table); 1158 if (error) { 1159 gctl_error(req, "%d recovering '%s' failed", 1160 error, gp->name); 1161 return (error); 1162 } 1163 recovered = 1; 1164 } 1165 /* Provide feedback if so requested. */ 1166 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1167 sb = sbuf_new_auto(); 1168 if (recovered) 1169 sbuf_printf(sb, "%s recovered\n", gp->name); 1170 else 1171 sbuf_printf(sb, "%s recovering is not needed\n", 1172 gp->name); 1173 sbuf_finish(sb); 1174 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1175 sbuf_delete(sb); 1176 } 1177 return (0); 1178} 1179 1180static int 1181g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1182{ 1183 struct g_geom *gp; 1184 struct g_provider *pp; 1185 struct g_part_entry *pe, *entry; 1186 struct g_part_table *table; 1187 struct sbuf *sb; 1188 quad_t end; 1189 int error; 1190 1191 gp = gpp->gpp_geom; 1192 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1193 g_topology_assert(); 1194 table = gp->softc; 1195 1196 /* check gpp_index */ 1197 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1198 if (entry->gpe_deleted || entry->gpe_internal) 1199 continue; 1200 if (entry->gpe_index == gpp->gpp_index) 1201 break; 1202 } 1203 if (entry == NULL) { 1204 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1205 return (ENOENT); 1206 } 1207 1208 /* check gpp_size */ 1209 end = entry->gpe_start + gpp->gpp_size - 1; 1210 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1211 gctl_error(req, "%d size '%jd'", EINVAL, 1212 (intmax_t)gpp->gpp_size); 1213 return (EINVAL); 1214 } 1215 1216 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1217 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1218 continue; 1219 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1220 gctl_error(req, "%d end '%jd'", ENOSPC, 1221 (intmax_t)end); 1222 return (ENOSPC); 1223 } 1224 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1225 gctl_error(req, "%d size '%jd'", ENOSPC, 1226 (intmax_t)gpp->gpp_size); 1227 return (ENOSPC); 1228 } 1229 } 1230 1231 pp = entry->gpe_pp; 1232 if ((g_debugflags & 16) == 0 && 1233 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1234 gctl_error(req, "%d", EBUSY); 1235 return (EBUSY); 1236 } 1237 1238 error = G_PART_RESIZE(table, entry, gpp); 1239 if (error) { 1240 gctl_error(req, "%d", error); 1241 return (error); 1242 } 1243 1244 if (!entry->gpe_created) 1245 entry->gpe_modified = 1; 1246 1247 /* update mediasize of changed provider */ 1248 pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1249 pp->sectorsize; 1250 1251 /* Provide feedback if so requested. */ 1252 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1253 sb = sbuf_new_auto(); 1254 G_PART_FULLNAME(table, entry, sb, gp->name); 1255 sbuf_cat(sb, " resized\n"); 1256 sbuf_finish(sb); 1257 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1258 sbuf_delete(sb); 1259 } 1260 return (0); 1261} 1262 1263static int 1264g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1265 unsigned int set) 1266{ 1267 struct g_geom *gp; 1268 struct g_part_entry *entry; 1269 struct g_part_table *table; 1270 struct sbuf *sb; 1271 int error; 1272 1273 gp = gpp->gpp_geom; 1274 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1275 g_topology_assert(); 1276 1277 table = gp->softc; 1278 1279 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1280 if (entry->gpe_deleted || entry->gpe_internal) 1281 continue; 1282 if (entry->gpe_index == gpp->gpp_index) 1283 break; 1284 } 1285 if (entry == NULL) { 1286 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1287 return (ENOENT); 1288 } 1289 1290 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1291 if (error) { 1292 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1293 return (error); 1294 } 1295 1296 /* Provide feedback if so requested. */ 1297 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1298 sb = sbuf_new_auto(); 1299 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1300 (set) ? "" : "un"); 1301 G_PART_FULLNAME(table, entry, sb, gp->name); 1302 sbuf_printf(sb, "\n"); 1303 sbuf_finish(sb); 1304 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1305 sbuf_delete(sb); 1306 } 1307 return (0); 1308} 1309 1310static int 1311g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1312{ 1313 struct g_consumer *cp; 1314 struct g_provider *pp; 1315 struct g_geom *gp; 1316 struct g_part_entry *entry, *tmp; 1317 struct g_part_table *table; 1318 int error, reprobe; 1319 1320 gp = gpp->gpp_geom; 1321 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1322 g_topology_assert(); 1323 1324 table = gp->softc; 1325 if (!table->gpt_opened) { 1326 gctl_error(req, "%d", EPERM); 1327 return (EPERM); 1328 } 1329 1330 cp = LIST_FIRST(&gp->consumer); 1331 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1332 entry->gpe_modified = 0; 1333 if (entry->gpe_created) { 1334 pp = entry->gpe_pp; 1335 if (pp != NULL) { 1336 pp->private = NULL; 1337 entry->gpe_pp = NULL; 1338 g_wither_provider(pp, ENXIO); 1339 } 1340 entry->gpe_deleted = 1; 1341 } 1342 if (entry->gpe_deleted) { 1343 LIST_REMOVE(entry, gpe_entry); 1344 g_free(entry); 1345 } 1346 } 1347 1348 g_topology_unlock(); 1349 1350 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1351 table->gpt_created) ? 1 : 0; 1352 1353 if (reprobe) { 1354 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1355 if (entry->gpe_internal) 1356 continue; 1357 error = EBUSY; 1358 goto fail; 1359 } 1360 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1361 LIST_REMOVE(entry, gpe_entry); 1362 g_free(entry); 1363 } 1364 error = g_part_probe(gp, cp, table->gpt_depth); 1365 if (error) { 1366 g_topology_lock(); 1367 g_access(cp, -1, -1, -1); 1368 g_part_wither(gp, error); 1369 return (0); 1370 } 1371 table = gp->softc; 1372 1373 /* 1374 * Synthesize a disk geometry. Some partitioning schemes 1375 * depend on it and since some file systems need it even 1376 * when the partitition scheme doesn't, we do it here in 1377 * scheme-independent code. 1378 */ 1379 pp = cp->provider; 1380 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1381 } 1382 1383 error = G_PART_READ(table, cp); 1384 if (error) 1385 goto fail; 1386 error = g_part_check_integrity(table, cp); 1387 if (error) 1388 goto fail; 1389 1390 g_topology_lock(); 1391 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1392 if (!entry->gpe_internal) 1393 g_part_new_provider(gp, table, entry); 1394 } 1395 1396 table->gpt_opened = 0; 1397 g_access(cp, -1, -1, -1); 1398 return (0); 1399 1400fail: 1401 g_topology_lock(); 1402 gctl_error(req, "%d", error); 1403 return (error); 1404} 1405 1406static void 1407g_part_wither(struct g_geom *gp, int error) 1408{ 1409 struct g_part_entry *entry; 1410 struct g_part_table *table; 1411 1412 table = gp->softc; 1413 if (table != NULL) { 1414 G_PART_DESTROY(table, NULL); 1415 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1416 LIST_REMOVE(entry, gpe_entry); 1417 g_free(entry); 1418 } 1419 if (gp->softc != NULL) { 1420 kobj_delete((kobj_t)gp->softc, M_GEOM); 1421 gp->softc = NULL; 1422 } 1423 } 1424 g_wither_geom(gp, error); 1425} 1426 1427/* 1428 * Class methods. 1429 */ 1430 1431static void 1432g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1433{ 1434 struct g_part_parms gpp; 1435 struct g_part_table *table; 1436 struct gctl_req_arg *ap; 1437 enum g_part_ctl ctlreq; 1438 unsigned int i, mparms, oparms, parm; 1439 int auto_commit, close_on_error; 1440 int error, modifies; 1441 1442 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1443 g_topology_assert(); 1444 1445 ctlreq = G_PART_CTL_NONE; 1446 modifies = 1; 1447 mparms = 0; 1448 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1449 switch (*verb) { 1450 case 'a': 1451 if (!strcmp(verb, "add")) { 1452 ctlreq = G_PART_CTL_ADD; 1453 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1454 G_PART_PARM_START | G_PART_PARM_TYPE; 1455 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1456 } 1457 break; 1458 case 'b': 1459 if (!strcmp(verb, "bootcode")) { 1460 ctlreq = G_PART_CTL_BOOTCODE; 1461 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1462 } 1463 break; 1464 case 'c': 1465 if (!strcmp(verb, "commit")) { 1466 ctlreq = G_PART_CTL_COMMIT; 1467 mparms |= G_PART_PARM_GEOM; 1468 modifies = 0; 1469 } else if (!strcmp(verb, "create")) { 1470 ctlreq = G_PART_CTL_CREATE; 1471 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1472 oparms |= G_PART_PARM_ENTRIES; 1473 } 1474 break; 1475 case 'd': 1476 if (!strcmp(verb, "delete")) { 1477 ctlreq = G_PART_CTL_DELETE; 1478 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1479 } else if (!strcmp(verb, "destroy")) { 1480 ctlreq = G_PART_CTL_DESTROY; 1481 mparms |= G_PART_PARM_GEOM; 1482 oparms |= G_PART_PARM_FORCE; 1483 } 1484 break; 1485 case 'm': 1486 if (!strcmp(verb, "modify")) { 1487 ctlreq = G_PART_CTL_MODIFY; 1488 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1489 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1490 } else if (!strcmp(verb, "move")) { 1491 ctlreq = G_PART_CTL_MOVE; 1492 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1493 } 1494 break; 1495 case 'r': 1496 if (!strcmp(verb, "recover")) { 1497 ctlreq = G_PART_CTL_RECOVER; 1498 mparms |= G_PART_PARM_GEOM; 1499 } else if (!strcmp(verb, "resize")) { 1500 ctlreq = G_PART_CTL_RESIZE; 1501 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1502 G_PART_PARM_SIZE; 1503 } 1504 break; 1505 case 's': 1506 if (!strcmp(verb, "set")) { 1507 ctlreq = G_PART_CTL_SET; 1508 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1509 G_PART_PARM_INDEX; 1510 } 1511 break; 1512 case 'u': 1513 if (!strcmp(verb, "undo")) { 1514 ctlreq = G_PART_CTL_UNDO; 1515 mparms |= G_PART_PARM_GEOM; 1516 modifies = 0; 1517 } else if (!strcmp(verb, "unset")) { 1518 ctlreq = G_PART_CTL_UNSET; 1519 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM | 1520 G_PART_PARM_INDEX; 1521 } 1522 break; 1523 } 1524 if (ctlreq == G_PART_CTL_NONE) { 1525 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1526 return; 1527 } 1528 1529 bzero(&gpp, sizeof(gpp)); 1530 for (i = 0; i < req->narg; i++) { 1531 ap = &req->arg[i]; 1532 parm = 0; 1533 switch (ap->name[0]) { 1534 case 'a': 1535 if (!strcmp(ap->name, "arg0")) { 1536 parm = mparms & 1537 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1538 } 1539 if (!strcmp(ap->name, "attrib")) 1540 parm = G_PART_PARM_ATTRIB; 1541 break; 1542 case 'b': 1543 if (!strcmp(ap->name, "bootcode")) 1544 parm = G_PART_PARM_BOOTCODE; 1545 break; 1546 case 'c': 1547 if (!strcmp(ap->name, "class")) 1548 continue; 1549 break; 1550 case 'e': 1551 if (!strcmp(ap->name, "entries")) 1552 parm = G_PART_PARM_ENTRIES; 1553 break; 1554 case 'f': 1555 if (!strcmp(ap->name, "flags")) 1556 parm = G_PART_PARM_FLAGS; 1557 else if (!strcmp(ap->name, "force")) 1558 parm = G_PART_PARM_FORCE; 1559 break; 1560 case 'i': 1561 if (!strcmp(ap->name, "index")) 1562 parm = G_PART_PARM_INDEX; 1563 break; 1564 case 'l': 1565 if (!strcmp(ap->name, "label")) 1566 parm = G_PART_PARM_LABEL; 1567 break; 1568 case 'o': 1569 if (!strcmp(ap->name, "output")) 1570 parm = G_PART_PARM_OUTPUT; 1571 break; 1572 case 's': 1573 if (!strcmp(ap->name, "scheme")) 1574 parm = G_PART_PARM_SCHEME; 1575 else if (!strcmp(ap->name, "size")) 1576 parm = G_PART_PARM_SIZE; 1577 else if (!strcmp(ap->name, "start")) 1578 parm = G_PART_PARM_START; 1579 break; 1580 case 't': 1581 if (!strcmp(ap->name, "type")) 1582 parm = G_PART_PARM_TYPE; 1583 break; 1584 case 'v': 1585 if (!strcmp(ap->name, "verb")) 1586 continue; 1587 else if (!strcmp(ap->name, "version")) 1588 parm = G_PART_PARM_VERSION; 1589 break; 1590 } 1591 if ((parm & (mparms | oparms)) == 0) { 1592 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1593 return; 1594 } 1595 switch (parm) { 1596 case G_PART_PARM_ATTRIB: 1597 error = g_part_parm_str(req, ap->name, 1598 &gpp.gpp_attrib); 1599 break; 1600 case G_PART_PARM_BOOTCODE: 1601 error = g_part_parm_bootcode(req, ap->name, 1602 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1603 break; 1604 case G_PART_PARM_ENTRIES: 1605 error = g_part_parm_intmax(req, ap->name, 1606 &gpp.gpp_entries); 1607 break; 1608 case G_PART_PARM_FLAGS: 1609 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1610 break; 1611 case G_PART_PARM_FORCE: 1612 error = g_part_parm_uint32(req, ap->name, 1613 &gpp.gpp_force); 1614 break; 1615 case G_PART_PARM_GEOM: 1616 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1617 break; 1618 case G_PART_PARM_INDEX: 1619 error = g_part_parm_intmax(req, ap->name, 1620 &gpp.gpp_index); 1621 break; 1622 case G_PART_PARM_LABEL: 1623 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1624 break; 1625 case G_PART_PARM_OUTPUT: 1626 error = 0; /* Write-only parameter */ 1627 break; 1628 case G_PART_PARM_PROVIDER: 1629 error = g_part_parm_provider(req, ap->name, 1630 &gpp.gpp_provider); 1631 break; 1632 case G_PART_PARM_SCHEME: 1633 error = g_part_parm_scheme(req, ap->name, 1634 &gpp.gpp_scheme); 1635 break; 1636 case G_PART_PARM_SIZE: 1637 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1638 break; 1639 case G_PART_PARM_START: 1640 error = g_part_parm_quad(req, ap->name, 1641 &gpp.gpp_start); 1642 break; 1643 case G_PART_PARM_TYPE: 1644 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1645 break; 1646 case G_PART_PARM_VERSION: 1647 error = g_part_parm_uint32(req, ap->name, 1648 &gpp.gpp_version); 1649 break; 1650 default: 1651 error = EDOOFUS; 1652 gctl_error(req, "%d %s", error, ap->name); 1653 break; 1654 } 1655 if (error != 0) { 1656 if (error == ENOATTR) { 1657 gctl_error(req, "%d param '%s'", error, 1658 ap->name); 1659 } 1660 return; 1661 } 1662 gpp.gpp_parms |= parm; 1663 } 1664 if ((gpp.gpp_parms & mparms) != mparms) { 1665 parm = mparms - (gpp.gpp_parms & mparms); 1666 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1667 return; 1668 } 1669 1670 /* Obtain permissions if possible/necessary. */ 1671 close_on_error = 0; 1672 table = NULL; 1673 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1674 table = gpp.gpp_geom->softc; 1675 if (table != NULL && table->gpt_corrupt && 1676 ctlreq != G_PART_CTL_DESTROY && 1677 ctlreq != G_PART_CTL_RECOVER) { 1678 gctl_error(req, "%d table '%s' is corrupt", 1679 EPERM, gpp.gpp_geom->name); 1680 return; 1681 } 1682 if (table != NULL && !table->gpt_opened) { 1683 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1684 1, 1, 1); 1685 if (error) { 1686 gctl_error(req, "%d geom '%s'", error, 1687 gpp.gpp_geom->name); 1688 return; 1689 } 1690 table->gpt_opened = 1; 1691 close_on_error = 1; 1692 } 1693 } 1694 1695 /* Allow the scheme to check or modify the parameters. */ 1696 if (table != NULL) { 1697 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1698 if (error) { 1699 gctl_error(req, "%d pre-check failed", error); 1700 goto out; 1701 } 1702 } else 1703 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1704 1705 switch (ctlreq) { 1706 case G_PART_CTL_NONE: 1707 panic("%s", __func__); 1708 case G_PART_CTL_ADD: 1709 error = g_part_ctl_add(req, &gpp); 1710 break; 1711 case G_PART_CTL_BOOTCODE: 1712 error = g_part_ctl_bootcode(req, &gpp); 1713 break; 1714 case G_PART_CTL_COMMIT: 1715 error = g_part_ctl_commit(req, &gpp); 1716 break; 1717 case G_PART_CTL_CREATE: 1718 error = g_part_ctl_create(req, &gpp); 1719 break; 1720 case G_PART_CTL_DELETE: 1721 error = g_part_ctl_delete(req, &gpp); 1722 break; 1723 case G_PART_CTL_DESTROY: 1724 error = g_part_ctl_destroy(req, &gpp); 1725 break; 1726 case G_PART_CTL_MODIFY: 1727 error = g_part_ctl_modify(req, &gpp); 1728 break; 1729 case G_PART_CTL_MOVE: 1730 error = g_part_ctl_move(req, &gpp); 1731 break; 1732 case G_PART_CTL_RECOVER: 1733 error = g_part_ctl_recover(req, &gpp); 1734 break; 1735 case G_PART_CTL_RESIZE: 1736 error = g_part_ctl_resize(req, &gpp); 1737 break; 1738 case G_PART_CTL_SET: 1739 error = g_part_ctl_setunset(req, &gpp, 1); 1740 break; 1741 case G_PART_CTL_UNDO: 1742 error = g_part_ctl_undo(req, &gpp); 1743 break; 1744 case G_PART_CTL_UNSET: 1745 error = g_part_ctl_setunset(req, &gpp, 0); 1746 break; 1747 } 1748 1749 /* Implement automatic commit. */ 1750 if (!error) { 1751 auto_commit = (modifies && 1752 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1753 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1754 if (auto_commit) { 1755 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1756 __func__)); 1757 error = g_part_ctl_commit(req, &gpp); 1758 } 1759 } 1760 1761 out: 1762 if (error && close_on_error) { 1763 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1764 table->gpt_opened = 0; 1765 } 1766} 1767 1768static int 1769g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1770 struct g_geom *gp) 1771{ 1772 1773 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1774 g_topology_assert(); 1775 1776 g_part_wither(gp, EINVAL); 1777 return (0); 1778} 1779 1780static struct g_geom * 1781g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1782{ 1783 struct g_consumer *cp; 1784 struct g_geom *gp; 1785 struct g_part_entry *entry; 1786 struct g_part_table *table; 1787 struct root_hold_token *rht; 1788 int attr, depth; 1789 int error; 1790 1791 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1792 g_topology_assert(); 1793 1794 /* Skip providers that are already open for writing. */ 1795 if (pp->acw > 0) 1796 return (NULL); 1797 1798 /* 1799 * Create a GEOM with consumer and hook it up to the provider. 1800 * With that we become part of the topology. Optain read access 1801 * to the provider. 1802 */ 1803 gp = g_new_geomf(mp, "%s", pp->name); 1804 cp = g_new_consumer(gp); 1805 error = g_attach(cp, pp); 1806 if (error == 0) 1807 error = g_access(cp, 1, 0, 0); 1808 if (error != 0) { 1809 g_part_wither(gp, error); 1810 return (NULL); 1811 } 1812 1813 rht = root_mount_hold(mp->name); 1814 g_topology_unlock(); 1815 1816 /* 1817 * Short-circuit the whole probing galore when there's no 1818 * media present. 1819 */ 1820 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1821 error = ENODEV; 1822 goto fail; 1823 } 1824 1825 /* Make sure we can nest and if so, determine our depth. */ 1826 error = g_getattr("PART::isleaf", cp, &attr); 1827 if (!error && attr) { 1828 error = ENODEV; 1829 goto fail; 1830 } 1831 error = g_getattr("PART::depth", cp, &attr); 1832 depth = (!error) ? attr + 1 : 0; 1833 1834 error = g_part_probe(gp, cp, depth); 1835 if (error) 1836 goto fail; 1837 1838 table = gp->softc; 1839 1840 /* 1841 * Synthesize a disk geometry. Some partitioning schemes 1842 * depend on it and since some file systems need it even 1843 * when the partitition scheme doesn't, we do it here in 1844 * scheme-independent code. 1845 */ 1846 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1847 1848 error = G_PART_READ(table, cp); 1849 if (error) 1850 goto fail; 1851 error = g_part_check_integrity(table, cp); 1852 if (error) 1853 goto fail; 1854 1855 g_topology_lock(); 1856 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1857 if (!entry->gpe_internal) 1858 g_part_new_provider(gp, table, entry); 1859 } 1860 1861 root_mount_rel(rht); 1862 g_access(cp, -1, 0, 0); 1863 return (gp); 1864 1865 fail: 1866 g_topology_lock(); 1867 root_mount_rel(rht); 1868 g_access(cp, -1, 0, 0); 1869 g_part_wither(gp, error); 1870 return (NULL); 1871} 1872 1873/* 1874 * Geom methods. 1875 */ 1876 1877static int 1878g_part_access(struct g_provider *pp, int dr, int dw, int de) 1879{ 1880 struct g_consumer *cp; 1881 1882 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 1883 dw, de)); 1884 1885 cp = LIST_FIRST(&pp->geom->consumer); 1886 1887 /* We always gain write-exclusive access. */ 1888 return (g_access(cp, dr, dw, dw + de)); 1889} 1890 1891static void 1892g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1893 struct g_consumer *cp, struct g_provider *pp) 1894{ 1895 char buf[64]; 1896 struct g_part_entry *entry; 1897 struct g_part_table *table; 1898 1899 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 1900 table = gp->softc; 1901 1902 if (indent == NULL) { 1903 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 1904 entry = pp->private; 1905 if (entry == NULL) 1906 return; 1907 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 1908 (uintmax_t)entry->gpe_offset, 1909 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1910 /* 1911 * libdisk compatibility quirk - the scheme dumps the 1912 * slicer name and partition type in a way that is 1913 * compatible with libdisk. When libdisk is not used 1914 * anymore, this should go away. 1915 */ 1916 G_PART_DUMPCONF(table, entry, sb, indent); 1917 } else if (cp != NULL) { /* Consumer configuration. */ 1918 KASSERT(pp == NULL, ("%s", __func__)); 1919 /* none */ 1920 } else if (pp != NULL) { /* Provider configuration. */ 1921 entry = pp->private; 1922 if (entry == NULL) 1923 return; 1924 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 1925 (uintmax_t)entry->gpe_start); 1926 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 1927 (uintmax_t)entry->gpe_end); 1928 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 1929 entry->gpe_index); 1930 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1931 G_PART_TYPE(table, entry, buf, sizeof(buf))); 1932 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 1933 (uintmax_t)entry->gpe_offset); 1934 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 1935 (uintmax_t)pp->mediasize); 1936 G_PART_DUMPCONF(table, entry, sb, indent); 1937 } else { /* Geom configuration. */ 1938 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 1939 table->gpt_scheme->name); 1940 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 1941 table->gpt_entries); 1942 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 1943 (uintmax_t)table->gpt_first); 1944 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 1945 (uintmax_t)table->gpt_last); 1946 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 1947 table->gpt_sectors); 1948 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 1949 table->gpt_heads); 1950 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 1951 table->gpt_corrupt ? "CORRUPT": "OK"); 1952 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 1953 table->gpt_opened ? "true": "false"); 1954 G_PART_DUMPCONF(table, NULL, sb, indent); 1955 } 1956} 1957 1958static void 1959g_part_orphan(struct g_consumer *cp) 1960{ 1961 struct g_provider *pp; 1962 struct g_part_table *table; 1963 1964 pp = cp->provider; 1965 KASSERT(pp != NULL, ("%s", __func__)); 1966 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 1967 g_topology_assert(); 1968 1969 KASSERT(pp->error != 0, ("%s", __func__)); 1970 table = cp->geom->softc; 1971 if (table != NULL && table->gpt_opened) 1972 g_access(cp, -1, -1, -1); 1973 g_part_wither(cp->geom, pp->error); 1974} 1975 1976static void 1977g_part_spoiled(struct g_consumer *cp) 1978{ 1979 1980 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 1981 g_topology_assert(); 1982 1983 g_part_wither(cp->geom, ENXIO); 1984} 1985 1986static void 1987g_part_start(struct bio *bp) 1988{ 1989 struct bio *bp2; 1990 struct g_consumer *cp; 1991 struct g_geom *gp; 1992 struct g_part_entry *entry; 1993 struct g_part_table *table; 1994 struct g_kerneldump *gkd; 1995 struct g_provider *pp; 1996 1997 pp = bp->bio_to; 1998 gp = pp->geom; 1999 table = gp->softc; 2000 cp = LIST_FIRST(&gp->consumer); 2001 2002 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2003 pp->name)); 2004 2005 entry = pp->private; 2006 if (entry == NULL) { 2007 g_io_deliver(bp, ENXIO); 2008 return; 2009 } 2010 2011 switch(bp->bio_cmd) { 2012 case BIO_DELETE: 2013 case BIO_READ: 2014 case BIO_WRITE: 2015 if (bp->bio_offset >= pp->mediasize) { 2016 g_io_deliver(bp, EIO); 2017 return; 2018 } 2019 bp2 = g_clone_bio(bp); 2020 if (bp2 == NULL) { 2021 g_io_deliver(bp, ENOMEM); 2022 return; 2023 } 2024 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2025 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2026 bp2->bio_done = g_std_done; 2027 bp2->bio_offset += entry->gpe_offset; 2028 g_io_request(bp2, cp); 2029 return; 2030 case BIO_FLUSH: 2031 break; 2032 case BIO_GETATTR: 2033 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2034 return; 2035 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2036 return; 2037 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2038 return; 2039 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2040 return; 2041 if (g_handleattr_str(bp, "PART::scheme", 2042 table->gpt_scheme->name)) 2043 return; 2044 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2045 /* 2046 * Check that the partition is suitable for kernel 2047 * dumps. Typically only swap partitions should be 2048 * used. 2049 */ 2050 if (!G_PART_DUMPTO(table, entry)) { 2051 g_io_deliver(bp, ENODEV); 2052 printf("GEOM_PART: Partition '%s' not suitable" 2053 " for kernel dumps (wrong type?)\n", 2054 pp->name); 2055 return; 2056 } 2057 gkd = (struct g_kerneldump *)bp->bio_data; 2058 if (gkd->offset >= pp->mediasize) { 2059 g_io_deliver(bp, EIO); 2060 return; 2061 } 2062 if (gkd->offset + gkd->length > pp->mediasize) 2063 gkd->length = pp->mediasize - gkd->offset; 2064 gkd->offset += entry->gpe_offset; 2065 } 2066 break; 2067 default: 2068 g_io_deliver(bp, EOPNOTSUPP); 2069 return; 2070 } 2071 2072 bp2 = g_clone_bio(bp); 2073 if (bp2 == NULL) { 2074 g_io_deliver(bp, ENOMEM); 2075 return; 2076 } 2077 bp2->bio_done = g_std_done; 2078 g_io_request(bp2, cp); 2079} 2080 2081static void 2082g_part_init(struct g_class *mp) 2083{ 2084 2085 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2086} 2087 2088static void 2089g_part_fini(struct g_class *mp) 2090{ 2091 2092 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2093} 2094 2095static void 2096g_part_unload_event(void *arg, int flag) 2097{ 2098 struct g_consumer *cp; 2099 struct g_geom *gp; 2100 struct g_provider *pp; 2101 struct g_part_scheme *scheme; 2102 struct g_part_table *table; 2103 uintptr_t *xchg; 2104 int acc, error; 2105 2106 if (flag == EV_CANCEL) 2107 return; 2108 2109 xchg = arg; 2110 error = 0; 2111 scheme = (void *)(*xchg); 2112 2113 g_topology_assert(); 2114 2115 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2116 table = gp->softc; 2117 if (table->gpt_scheme != scheme) 2118 continue; 2119 2120 acc = 0; 2121 LIST_FOREACH(pp, &gp->provider, provider) 2122 acc += pp->acr + pp->acw + pp->ace; 2123 LIST_FOREACH(cp, &gp->consumer, consumer) 2124 acc += cp->acr + cp->acw + cp->ace; 2125 2126 if (!acc) 2127 g_part_wither(gp, ENOSYS); 2128 else 2129 error = EBUSY; 2130 } 2131 2132 if (!error) 2133 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2134 2135 *xchg = error; 2136} 2137 2138int 2139g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2140{ 2141 uintptr_t arg; 2142 int error; 2143 2144 switch (type) { 2145 case MOD_LOAD: 2146 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list); 2147 2148 error = g_retaste(&g_part_class); 2149 if (error) 2150 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2151 break; 2152 case MOD_UNLOAD: 2153 arg = (uintptr_t)scheme; 2154 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2155 NULL); 2156 if (!error) 2157 error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg; 2158 break; 2159 default: 2160 error = EOPNOTSUPP; 2161 break; 2162 } 2163 2164 return (error); 2165} 2166