g_part.c revision 332521
1/*- 2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/geom/part/g_part.c 332521 2018-04-16 00:29:07Z kevans $"); 29 30#include <sys/param.h> 31#include <sys/bio.h> 32#include <sys/endian.h> 33#include <sys/kernel.h> 34#include <sys/kobj.h> 35#include <sys/limits.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/queue.h> 40#include <sys/sbuf.h> 41#include <sys/sysctl.h> 42#include <sys/systm.h> 43#include <sys/uuid.h> 44#include <geom/geom.h> 45#include <geom/geom_ctl.h> 46#include <geom/geom_int.h> 47#include <geom/part/g_part.h> 48 49#include "g_part_if.h" 50 51#ifndef _PATH_DEV 52#define _PATH_DEV "/dev/" 53#endif 54 55static kobj_method_t g_part_null_methods[] = { 56 { 0, 0 } 57}; 58 59static struct g_part_scheme g_part_null_scheme = { 60 "(none)", 61 g_part_null_methods, 62 sizeof(struct g_part_table), 63}; 64 65TAILQ_HEAD(, g_part_scheme) g_part_schemes = 66 TAILQ_HEAD_INITIALIZER(g_part_schemes); 67 68struct g_part_alias_list { 69 const char *lexeme; 70 enum g_part_alias alias; 71} g_part_alias_list[G_PART_ALIAS_COUNT] = { 72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT }, 73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE }, 74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS }, 75 { "apple-label", G_PART_ALIAS_APPLE_LABEL }, 76 { "apple-raid", G_PART_ALIAS_APPLE_RAID }, 77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE }, 78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY }, 79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS }, 80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT }, 81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE }, 82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL }, 83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED }, 84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT }, 85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD }, 86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER }, 87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 }, 88 { "dragonfly-label32", G_PART_ALIAS_DFBSD }, 89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 }, 90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY }, 91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP }, 92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS }, 93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM }, 94 { "ebr", G_PART_ALIAS_EBR }, 95 { "efi", G_PART_ALIAS_EFI }, 96 { "fat16", G_PART_ALIAS_MS_FAT16 }, 97 { "fat32", G_PART_ALIAS_MS_FAT32 }, 98 { "freebsd", G_PART_ALIAS_FREEBSD }, 99 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT }, 100 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS }, 101 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP }, 102 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS }, 103 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM }, 104 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS }, 105 { "linux-data", G_PART_ALIAS_LINUX_DATA }, 106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM }, 107 { "linux-raid", G_PART_ALIAS_LINUX_RAID }, 108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP }, 109 { "mbr", G_PART_ALIAS_MBR }, 110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA }, 111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA }, 112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA }, 113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY }, 114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED }, 115 { "ms-spaces", G_PART_ALIAS_MS_SPACES }, 116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD }, 117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD }, 118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS }, 119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS }, 120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID }, 121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP }, 122 { "ntfs", G_PART_ALIAS_MS_NTFS }, 123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA }, 124 { "prep-boot", G_PART_ALIAS_PREP_BOOT }, 125 { "vmware-reserved", G_PART_ALIAS_VMRESERVED }, 126 { "vmware-vmfs", G_PART_ALIAS_VMFS }, 127 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG }, 128 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR }, 129}; 130 131SYSCTL_DECL(_kern_geom); 132SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, 133 "GEOM_PART stuff"); 134static u_int check_integrity = 1; 135SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, 136 CTLFLAG_RWTUN, &check_integrity, 1, 137 "Enable integrity checking"); 138 139/* 140 * The GEOM partitioning class. 141 */ 142static g_ctl_req_t g_part_ctlreq; 143static g_ctl_destroy_geom_t g_part_destroy_geom; 144static g_fini_t g_part_fini; 145static g_init_t g_part_init; 146static g_taste_t g_part_taste; 147 148static g_access_t g_part_access; 149static g_dumpconf_t g_part_dumpconf; 150static g_orphan_t g_part_orphan; 151static g_spoiled_t g_part_spoiled; 152static g_start_t g_part_start; 153static g_resize_t g_part_resize; 154static g_ioctl_t g_part_ioctl; 155 156static struct g_class g_part_class = { 157 .name = "PART", 158 .version = G_VERSION, 159 /* Class methods. */ 160 .ctlreq = g_part_ctlreq, 161 .destroy_geom = g_part_destroy_geom, 162 .fini = g_part_fini, 163 .init = g_part_init, 164 .taste = g_part_taste, 165 /* Geom methods. */ 166 .access = g_part_access, 167 .dumpconf = g_part_dumpconf, 168 .orphan = g_part_orphan, 169 .spoiled = g_part_spoiled, 170 .start = g_part_start, 171 .resize = g_part_resize, 172 .ioctl = g_part_ioctl, 173}; 174 175DECLARE_GEOM_CLASS(g_part_class, g_part); 176MODULE_VERSION(g_part, 0); 177 178/* 179 * Support functions. 180 */ 181 182static void g_part_wither(struct g_geom *, int); 183 184const char * 185g_part_alias_name(enum g_part_alias alias) 186{ 187 int i; 188 189 for (i = 0; i < G_PART_ALIAS_COUNT; i++) { 190 if (g_part_alias_list[i].alias != alias) 191 continue; 192 return (g_part_alias_list[i].lexeme); 193 } 194 195 return (NULL); 196} 197 198void 199g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs, 200 u_int *bestheads) 201{ 202 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 }; 203 off_t chs, cylinders; 204 u_int heads; 205 int idx; 206 207 *bestchs = 0; 208 *bestheads = 0; 209 for (idx = 0; candidate_heads[idx] != 0; idx++) { 210 heads = candidate_heads[idx]; 211 cylinders = blocks / heads / sectors; 212 if (cylinders < heads || cylinders < sectors) 213 break; 214 if (cylinders > 1023) 215 continue; 216 chs = cylinders * heads * sectors; 217 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) { 218 *bestchs = chs; 219 *bestheads = heads; 220 } 221 } 222} 223 224static void 225g_part_geometry(struct g_part_table *table, struct g_consumer *cp, 226 off_t blocks) 227{ 228 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 }; 229 off_t chs, bestchs; 230 u_int heads, sectors; 231 int idx; 232 233 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 || 234 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) { 235 table->gpt_fixgeom = 0; 236 table->gpt_heads = 0; 237 table->gpt_sectors = 0; 238 bestchs = 0; 239 for (idx = 0; candidate_sectors[idx] != 0; idx++) { 240 sectors = candidate_sectors[idx]; 241 g_part_geometry_heads(blocks, sectors, &chs, &heads); 242 if (chs == 0) 243 continue; 244 /* 245 * Prefer a geometry with sectors > 1, but only if 246 * it doesn't bump down the number of heads to 1. 247 */ 248 if (chs > bestchs || (chs == bestchs && heads > 1 && 249 table->gpt_sectors == 1)) { 250 bestchs = chs; 251 table->gpt_heads = heads; 252 table->gpt_sectors = sectors; 253 } 254 } 255 /* 256 * If we didn't find a geometry at all, then the disk is 257 * too big. This means we can use the maximum number of 258 * heads and sectors. 259 */ 260 if (bestchs == 0) { 261 table->gpt_heads = 255; 262 table->gpt_sectors = 63; 263 } 264 } else { 265 table->gpt_fixgeom = 1; 266 table->gpt_heads = heads; 267 table->gpt_sectors = sectors; 268 } 269} 270 271static void 272g_part_get_physpath_done(struct bio *bp) 273{ 274 struct g_geom *gp; 275 struct g_part_entry *entry; 276 struct g_part_table *table; 277 struct g_provider *pp; 278 struct bio *pbp; 279 280 pbp = bp->bio_parent; 281 pp = pbp->bio_to; 282 gp = pp->geom; 283 table = gp->softc; 284 entry = pp->private; 285 286 if (bp->bio_error == 0) { 287 char *end; 288 size_t len, remainder; 289 len = strlcat(bp->bio_data, "/", bp->bio_length); 290 if (len < bp->bio_length) { 291 end = bp->bio_data + len; 292 remainder = bp->bio_length - len; 293 G_PART_NAME(table, entry, end, remainder); 294 } 295 } 296 g_std_done(bp); 297} 298 299 300#define DPRINTF(...) if (bootverbose) { \ 301 printf("GEOM_PART: " __VA_ARGS__); \ 302} 303 304static int 305g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp) 306{ 307 struct g_part_entry *e1, *e2; 308 struct g_provider *pp; 309 off_t offset; 310 int failed; 311 312 failed = 0; 313 pp = cp->provider; 314 if (table->gpt_last < table->gpt_first) { 315 DPRINTF("last LBA is below first LBA: %jd < %jd\n", 316 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first); 317 failed++; 318 } 319 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) { 320 DPRINTF("last LBA extends beyond mediasize: " 321 "%jd > %jd\n", (intmax_t)table->gpt_last, 322 (intmax_t)pp->mediasize / pp->sectorsize - 1); 323 failed++; 324 } 325 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) { 326 if (e1->gpe_deleted || e1->gpe_internal) 327 continue; 328 if (e1->gpe_start < table->gpt_first) { 329 DPRINTF("partition %d has start offset below first " 330 "LBA: %jd < %jd\n", e1->gpe_index, 331 (intmax_t)e1->gpe_start, 332 (intmax_t)table->gpt_first); 333 failed++; 334 } 335 if (e1->gpe_start > table->gpt_last) { 336 DPRINTF("partition %d has start offset beyond last " 337 "LBA: %jd > %jd\n", e1->gpe_index, 338 (intmax_t)e1->gpe_start, 339 (intmax_t)table->gpt_last); 340 failed++; 341 } 342 if (e1->gpe_end < e1->gpe_start) { 343 DPRINTF("partition %d has end offset below start " 344 "offset: %jd < %jd\n", e1->gpe_index, 345 (intmax_t)e1->gpe_end, 346 (intmax_t)e1->gpe_start); 347 failed++; 348 } 349 if (e1->gpe_end > table->gpt_last) { 350 DPRINTF("partition %d has end offset beyond last " 351 "LBA: %jd > %jd\n", e1->gpe_index, 352 (intmax_t)e1->gpe_end, 353 (intmax_t)table->gpt_last); 354 failed++; 355 } 356 if (pp->stripesize > 0) { 357 offset = e1->gpe_start * pp->sectorsize; 358 if (e1->gpe_offset > offset) 359 offset = e1->gpe_offset; 360 if ((offset + pp->stripeoffset) % pp->stripesize) { 361 DPRINTF("partition %d on (%s, %s) is not " 362 "aligned on %u bytes\n", e1->gpe_index, 363 pp->name, table->gpt_scheme->name, 364 pp->stripesize); 365 /* Don't treat this as a critical failure */ 366 } 367 } 368 e2 = e1; 369 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) { 370 if (e2->gpe_deleted || e2->gpe_internal) 371 continue; 372 if (e1->gpe_start >= e2->gpe_start && 373 e1->gpe_start <= e2->gpe_end) { 374 DPRINTF("partition %d has start offset inside " 375 "partition %d: start[%d] %jd >= start[%d] " 376 "%jd <= end[%d] %jd\n", 377 e1->gpe_index, e2->gpe_index, 378 e2->gpe_index, (intmax_t)e2->gpe_start, 379 e1->gpe_index, (intmax_t)e1->gpe_start, 380 e2->gpe_index, (intmax_t)e2->gpe_end); 381 failed++; 382 } 383 if (e1->gpe_end >= e2->gpe_start && 384 e1->gpe_end <= e2->gpe_end) { 385 DPRINTF("partition %d has end offset inside " 386 "partition %d: start[%d] %jd >= end[%d] " 387 "%jd <= end[%d] %jd\n", 388 e1->gpe_index, e2->gpe_index, 389 e2->gpe_index, (intmax_t)e2->gpe_start, 390 e1->gpe_index, (intmax_t)e1->gpe_end, 391 e2->gpe_index, (intmax_t)e2->gpe_end); 392 failed++; 393 } 394 if (e1->gpe_start < e2->gpe_start && 395 e1->gpe_end > e2->gpe_end) { 396 DPRINTF("partition %d contains partition %d: " 397 "start[%d] %jd > start[%d] %jd, end[%d] " 398 "%jd < end[%d] %jd\n", 399 e1->gpe_index, e2->gpe_index, 400 e1->gpe_index, (intmax_t)e1->gpe_start, 401 e2->gpe_index, (intmax_t)e2->gpe_start, 402 e2->gpe_index, (intmax_t)e2->gpe_end, 403 e1->gpe_index, (intmax_t)e1->gpe_end); 404 failed++; 405 } 406 } 407 } 408 if (failed != 0) { 409 printf("GEOM_PART: integrity check failed (%s, %s)\n", 410 pp->name, table->gpt_scheme->name); 411 if (check_integrity != 0) 412 return (EINVAL); 413 table->gpt_corrupt = 1; 414 } 415 return (0); 416} 417#undef DPRINTF 418 419struct g_part_entry * 420g_part_new_entry(struct g_part_table *table, int index, quad_t start, 421 quad_t end) 422{ 423 struct g_part_entry *entry, *last; 424 425 last = NULL; 426 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 427 if (entry->gpe_index == index) 428 break; 429 if (entry->gpe_index > index) { 430 entry = NULL; 431 break; 432 } 433 last = entry; 434 } 435 if (entry == NULL) { 436 entry = g_malloc(table->gpt_scheme->gps_entrysz, 437 M_WAITOK | M_ZERO); 438 entry->gpe_index = index; 439 if (last == NULL) 440 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 441 else 442 LIST_INSERT_AFTER(last, entry, gpe_entry); 443 } else 444 entry->gpe_offset = 0; 445 entry->gpe_start = start; 446 entry->gpe_end = end; 447 return (entry); 448} 449 450static void 451g_part_new_provider(struct g_geom *gp, struct g_part_table *table, 452 struct g_part_entry *entry) 453{ 454 struct g_consumer *cp; 455 struct g_provider *pp; 456 struct sbuf *sb; 457 off_t offset; 458 459 cp = LIST_FIRST(&gp->consumer); 460 pp = cp->provider; 461 462 offset = entry->gpe_start * pp->sectorsize; 463 if (entry->gpe_offset < offset) 464 entry->gpe_offset = offset; 465 466 if (entry->gpe_pp == NULL) { 467 sb = sbuf_new_auto(); 468 G_PART_FULLNAME(table, entry, sb, gp->name); 469 sbuf_finish(sb); 470 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 471 sbuf_delete(sb); 472 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 473 entry->gpe_pp->private = entry; /* Close the circle. */ 474 } 475 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */ 476 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) * 477 pp->sectorsize; 478 entry->gpe_pp->mediasize -= entry->gpe_offset - offset; 479 entry->gpe_pp->sectorsize = pp->sectorsize; 480 entry->gpe_pp->stripesize = pp->stripesize; 481 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset; 482 if (pp->stripesize > 0) 483 entry->gpe_pp->stripeoffset %= pp->stripesize; 484 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; 485 g_error_provider(entry->gpe_pp, 0); 486} 487 488static struct g_geom* 489g_part_find_geom(const char *name) 490{ 491 struct g_geom *gp; 492 LIST_FOREACH(gp, &g_part_class.geom, geom) { 493 if ((gp->flags & G_GEOM_WITHER) == 0 && 494 strcmp(name, gp->name) == 0) 495 break; 496 } 497 return (gp); 498} 499 500static int 501g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v) 502{ 503 struct g_geom *gp; 504 const char *gname; 505 506 gname = gctl_get_asciiparam(req, name); 507 if (gname == NULL) 508 return (ENOATTR); 509 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 510 gname += sizeof(_PATH_DEV) - 1; 511 gp = g_part_find_geom(gname); 512 if (gp == NULL) { 513 gctl_error(req, "%d %s '%s'", EINVAL, name, gname); 514 return (EINVAL); 515 } 516 *v = gp; 517 return (0); 518} 519 520static int 521g_part_parm_provider(struct gctl_req *req, const char *name, 522 struct g_provider **v) 523{ 524 struct g_provider *pp; 525 const char *pname; 526 527 pname = gctl_get_asciiparam(req, name); 528 if (pname == NULL) 529 return (ENOATTR); 530 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 531 pname += sizeof(_PATH_DEV) - 1; 532 pp = g_provider_by_name(pname); 533 if (pp == NULL) { 534 gctl_error(req, "%d %s '%s'", EINVAL, name, pname); 535 return (EINVAL); 536 } 537 *v = pp; 538 return (0); 539} 540 541static int 542g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v) 543{ 544 const char *p; 545 char *x; 546 quad_t q; 547 548 p = gctl_get_asciiparam(req, name); 549 if (p == NULL) 550 return (ENOATTR); 551 q = strtoq(p, &x, 0); 552 if (*x != '\0' || q < 0) { 553 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 554 return (EINVAL); 555 } 556 *v = q; 557 return (0); 558} 559 560static int 561g_part_parm_scheme(struct gctl_req *req, const char *name, 562 struct g_part_scheme **v) 563{ 564 struct g_part_scheme *s; 565 const char *p; 566 567 p = gctl_get_asciiparam(req, name); 568 if (p == NULL) 569 return (ENOATTR); 570 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) { 571 if (s == &g_part_null_scheme) 572 continue; 573 if (!strcasecmp(s->name, p)) 574 break; 575 } 576 if (s == NULL) { 577 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 578 return (EINVAL); 579 } 580 *v = s; 581 return (0); 582} 583 584static int 585g_part_parm_str(struct gctl_req *req, const char *name, const char **v) 586{ 587 const char *p; 588 589 p = gctl_get_asciiparam(req, name); 590 if (p == NULL) 591 return (ENOATTR); 592 /* An empty label is always valid. */ 593 if (strcmp(name, "label") != 0 && p[0] == '\0') { 594 gctl_error(req, "%d %s '%s'", EINVAL, name, p); 595 return (EINVAL); 596 } 597 *v = p; 598 return (0); 599} 600 601static int 602g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v) 603{ 604 const intmax_t *p; 605 int size; 606 607 p = gctl_get_param(req, name, &size); 608 if (p == NULL) 609 return (ENOATTR); 610 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) { 611 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p); 612 return (EINVAL); 613 } 614 *v = (u_int)*p; 615 return (0); 616} 617 618static int 619g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v) 620{ 621 const uint32_t *p; 622 int size; 623 624 p = gctl_get_param(req, name, &size); 625 if (p == NULL) 626 return (ENOATTR); 627 if (size != sizeof(*p) || *p > INT_MAX) { 628 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p); 629 return (EINVAL); 630 } 631 *v = (u_int)*p; 632 return (0); 633} 634 635static int 636g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v, 637 unsigned int *s) 638{ 639 const void *p; 640 int size; 641 642 p = gctl_get_param(req, name, &size); 643 if (p == NULL) 644 return (ENOATTR); 645 *v = p; 646 *s = size; 647 return (0); 648} 649 650static int 651g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth) 652{ 653 struct g_part_scheme *iter, *scheme; 654 struct g_part_table *table; 655 int pri, probe; 656 657 table = gp->softc; 658 scheme = (table != NULL) ? table->gpt_scheme : NULL; 659 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN; 660 if (pri == 0) 661 goto done; 662 if (pri > 0) { /* error */ 663 scheme = NULL; 664 pri = INT_MIN; 665 } 666 667 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 668 if (iter == &g_part_null_scheme) 669 continue; 670 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM, 671 M_WAITOK); 672 table->gpt_gp = gp; 673 table->gpt_scheme = iter; 674 table->gpt_depth = depth; 675 probe = G_PART_PROBE(table, cp); 676 if (probe <= 0 && probe > pri) { 677 pri = probe; 678 scheme = iter; 679 if (gp->softc != NULL) 680 kobj_delete((kobj_t)gp->softc, M_GEOM); 681 gp->softc = table; 682 if (pri == 0) 683 goto done; 684 } else 685 kobj_delete((kobj_t)table, M_GEOM); 686 } 687 688done: 689 return ((scheme == NULL) ? ENXIO : 0); 690} 691 692/* 693 * Control request functions. 694 */ 695 696static int 697g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp) 698{ 699 struct g_geom *gp; 700 struct g_provider *pp; 701 struct g_part_entry *delent, *last, *entry; 702 struct g_part_table *table; 703 struct sbuf *sb; 704 quad_t end; 705 unsigned int index; 706 int error; 707 708 gp = gpp->gpp_geom; 709 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 710 g_topology_assert(); 711 712 pp = LIST_FIRST(&gp->consumer)->provider; 713 table = gp->softc; 714 end = gpp->gpp_start + gpp->gpp_size - 1; 715 716 if (gpp->gpp_start < table->gpt_first || 717 gpp->gpp_start > table->gpt_last) { 718 gctl_error(req, "%d start '%jd'", EINVAL, 719 (intmax_t)gpp->gpp_start); 720 return (EINVAL); 721 } 722 if (end < gpp->gpp_start || end > table->gpt_last) { 723 gctl_error(req, "%d size '%jd'", EINVAL, 724 (intmax_t)gpp->gpp_size); 725 return (EINVAL); 726 } 727 if (gpp->gpp_index > table->gpt_entries) { 728 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index); 729 return (EINVAL); 730 } 731 732 delent = last = NULL; 733 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1; 734 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 735 if (entry->gpe_deleted) { 736 if (entry->gpe_index == index) 737 delent = entry; 738 continue; 739 } 740 if (entry->gpe_index == index) 741 index = entry->gpe_index + 1; 742 if (entry->gpe_index < index) 743 last = entry; 744 if (entry->gpe_internal) 745 continue; 746 if (gpp->gpp_start >= entry->gpe_start && 747 gpp->gpp_start <= entry->gpe_end) { 748 gctl_error(req, "%d start '%jd'", ENOSPC, 749 (intmax_t)gpp->gpp_start); 750 return (ENOSPC); 751 } 752 if (end >= entry->gpe_start && end <= entry->gpe_end) { 753 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end); 754 return (ENOSPC); 755 } 756 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) { 757 gctl_error(req, "%d size '%jd'", ENOSPC, 758 (intmax_t)gpp->gpp_size); 759 return (ENOSPC); 760 } 761 } 762 if (gpp->gpp_index > 0 && index != gpp->gpp_index) { 763 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index); 764 return (EEXIST); 765 } 766 if (index > table->gpt_entries) { 767 gctl_error(req, "%d index '%d'", ENOSPC, index); 768 return (ENOSPC); 769 } 770 771 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz, 772 M_WAITOK | M_ZERO) : delent; 773 entry->gpe_index = index; 774 entry->gpe_start = gpp->gpp_start; 775 entry->gpe_end = end; 776 error = G_PART_ADD(table, entry, gpp); 777 if (error) { 778 gctl_error(req, "%d", error); 779 if (delent == NULL) 780 g_free(entry); 781 return (error); 782 } 783 if (delent == NULL) { 784 if (last == NULL) 785 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry); 786 else 787 LIST_INSERT_AFTER(last, entry, gpe_entry); 788 entry->gpe_created = 1; 789 } else { 790 entry->gpe_deleted = 0; 791 entry->gpe_modified = 1; 792 } 793 g_part_new_provider(gp, table, entry); 794 795 /* Provide feedback if so requested. */ 796 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 797 sb = sbuf_new_auto(); 798 G_PART_FULLNAME(table, entry, sb, gp->name); 799 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0) 800 sbuf_printf(sb, " added, but partition is not " 801 "aligned on %u bytes\n", pp->stripesize); 802 else 803 sbuf_cat(sb, " added\n"); 804 sbuf_finish(sb); 805 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 806 sbuf_delete(sb); 807 } 808 return (0); 809} 810 811static int 812g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp) 813{ 814 struct g_geom *gp; 815 struct g_part_table *table; 816 struct sbuf *sb; 817 int error, sz; 818 819 gp = gpp->gpp_geom; 820 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 821 g_topology_assert(); 822 823 table = gp->softc; 824 sz = table->gpt_scheme->gps_bootcodesz; 825 if (sz == 0) { 826 error = ENODEV; 827 goto fail; 828 } 829 if (gpp->gpp_codesize > sz) { 830 error = EFBIG; 831 goto fail; 832 } 833 834 error = G_PART_BOOTCODE(table, gpp); 835 if (error) 836 goto fail; 837 838 /* Provide feedback if so requested. */ 839 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 840 sb = sbuf_new_auto(); 841 sbuf_printf(sb, "bootcode written to %s\n", gp->name); 842 sbuf_finish(sb); 843 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 844 sbuf_delete(sb); 845 } 846 return (0); 847 848 fail: 849 gctl_error(req, "%d", error); 850 return (error); 851} 852 853static int 854g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp) 855{ 856 struct g_consumer *cp; 857 struct g_geom *gp; 858 struct g_provider *pp; 859 struct g_part_entry *entry, *tmp; 860 struct g_part_table *table; 861 char *buf; 862 int error, i; 863 864 gp = gpp->gpp_geom; 865 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 866 g_topology_assert(); 867 868 table = gp->softc; 869 if (!table->gpt_opened) { 870 gctl_error(req, "%d", EPERM); 871 return (EPERM); 872 } 873 874 g_topology_unlock(); 875 876 cp = LIST_FIRST(&gp->consumer); 877 if ((table->gpt_smhead | table->gpt_smtail) != 0) { 878 pp = cp->provider; 879 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); 880 while (table->gpt_smhead != 0) { 881 i = ffs(table->gpt_smhead) - 1; 882 error = g_write_data(cp, i * pp->sectorsize, buf, 883 pp->sectorsize); 884 if (error) { 885 g_free(buf); 886 goto fail; 887 } 888 table->gpt_smhead &= ~(1 << i); 889 } 890 while (table->gpt_smtail != 0) { 891 i = ffs(table->gpt_smtail) - 1; 892 error = g_write_data(cp, pp->mediasize - (i + 1) * 893 pp->sectorsize, buf, pp->sectorsize); 894 if (error) { 895 g_free(buf); 896 goto fail; 897 } 898 table->gpt_smtail &= ~(1 << i); 899 } 900 g_free(buf); 901 } 902 903 if (table->gpt_scheme == &g_part_null_scheme) { 904 g_topology_lock(); 905 g_access(cp, -1, -1, -1); 906 g_part_wither(gp, ENXIO); 907 return (0); 908 } 909 910 error = G_PART_WRITE(table, cp); 911 if (error) 912 goto fail; 913 914 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 915 if (!entry->gpe_deleted) { 916 /* Notify consumers that provider might be changed. */ 917 if (entry->gpe_modified && ( 918 entry->gpe_pp->acw + entry->gpe_pp->ace + 919 entry->gpe_pp->acr) == 0) 920 g_media_changed(entry->gpe_pp, M_NOWAIT); 921 entry->gpe_created = 0; 922 entry->gpe_modified = 0; 923 continue; 924 } 925 LIST_REMOVE(entry, gpe_entry); 926 g_free(entry); 927 } 928 table->gpt_created = 0; 929 table->gpt_opened = 0; 930 931 g_topology_lock(); 932 g_access(cp, -1, -1, -1); 933 return (0); 934 935fail: 936 g_topology_lock(); 937 gctl_error(req, "%d", error); 938 return (error); 939} 940 941static int 942g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp) 943{ 944 struct g_consumer *cp; 945 struct g_geom *gp; 946 struct g_provider *pp; 947 struct g_part_scheme *scheme; 948 struct g_part_table *null, *table; 949 struct sbuf *sb; 950 int attr, error; 951 952 pp = gpp->gpp_provider; 953 scheme = gpp->gpp_scheme; 954 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 955 g_topology_assert(); 956 957 /* Check that there isn't already a g_part geom on the provider. */ 958 gp = g_part_find_geom(pp->name); 959 if (gp != NULL) { 960 null = gp->softc; 961 if (null->gpt_scheme != &g_part_null_scheme) { 962 gctl_error(req, "%d geom '%s'", EEXIST, pp->name); 963 return (EEXIST); 964 } 965 } else 966 null = NULL; 967 968 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) && 969 (gpp->gpp_entries < scheme->gps_minent || 970 gpp->gpp_entries > scheme->gps_maxent)) { 971 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries); 972 return (EINVAL); 973 } 974 975 if (null == NULL) 976 gp = g_new_geomf(&g_part_class, "%s", pp->name); 977 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM, 978 M_WAITOK); 979 table = gp->softc; 980 table->gpt_gp = gp; 981 table->gpt_scheme = gpp->gpp_scheme; 982 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ? 983 gpp->gpp_entries : scheme->gps_minent; 984 LIST_INIT(&table->gpt_entry); 985 if (null == NULL) { 986 cp = g_new_consumer(gp); 987 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 988 error = g_attach(cp, pp); 989 if (error == 0) 990 error = g_access(cp, 1, 1, 1); 991 if (error != 0) { 992 g_part_wither(gp, error); 993 gctl_error(req, "%d geom '%s'", error, pp->name); 994 return (error); 995 } 996 table->gpt_opened = 1; 997 } else { 998 cp = LIST_FIRST(&gp->consumer); 999 table->gpt_opened = null->gpt_opened; 1000 table->gpt_smhead = null->gpt_smhead; 1001 table->gpt_smtail = null->gpt_smtail; 1002 } 1003 1004 g_topology_unlock(); 1005 1006 /* Make sure the provider has media. */ 1007 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1008 error = ENODEV; 1009 goto fail; 1010 } 1011 1012 /* Make sure we can nest and if so, determine our depth. */ 1013 error = g_getattr("PART::isleaf", cp, &attr); 1014 if (!error && attr) { 1015 error = ENODEV; 1016 goto fail; 1017 } 1018 error = g_getattr("PART::depth", cp, &attr); 1019 table->gpt_depth = (!error) ? attr + 1 : 0; 1020 1021 /* 1022 * Synthesize a disk geometry. Some partitioning schemes 1023 * depend on it and since some file systems need it even 1024 * when the partitition scheme doesn't, we do it here in 1025 * scheme-independent code. 1026 */ 1027 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1028 1029 error = G_PART_CREATE(table, gpp); 1030 if (error) 1031 goto fail; 1032 1033 g_topology_lock(); 1034 1035 table->gpt_created = 1; 1036 if (null != NULL) 1037 kobj_delete((kobj_t)null, M_GEOM); 1038 1039 /* 1040 * Support automatic commit by filling in the gpp_geom 1041 * parameter. 1042 */ 1043 gpp->gpp_parms |= G_PART_PARM_GEOM; 1044 gpp->gpp_geom = gp; 1045 1046 /* Provide feedback if so requested. */ 1047 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1048 sb = sbuf_new_auto(); 1049 sbuf_printf(sb, "%s created\n", gp->name); 1050 sbuf_finish(sb); 1051 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1052 sbuf_delete(sb); 1053 } 1054 return (0); 1055 1056fail: 1057 g_topology_lock(); 1058 if (null == NULL) { 1059 g_access(cp, -1, -1, -1); 1060 g_part_wither(gp, error); 1061 } else { 1062 kobj_delete((kobj_t)gp->softc, M_GEOM); 1063 gp->softc = null; 1064 } 1065 gctl_error(req, "%d provider", error); 1066 return (error); 1067} 1068 1069static int 1070g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp) 1071{ 1072 struct g_geom *gp; 1073 struct g_provider *pp; 1074 struct g_part_entry *entry; 1075 struct g_part_table *table; 1076 struct sbuf *sb; 1077 1078 gp = gpp->gpp_geom; 1079 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1080 g_topology_assert(); 1081 1082 table = gp->softc; 1083 1084 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1085 if (entry->gpe_deleted || entry->gpe_internal) 1086 continue; 1087 if (entry->gpe_index == gpp->gpp_index) 1088 break; 1089 } 1090 if (entry == NULL) { 1091 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1092 return (ENOENT); 1093 } 1094 1095 pp = entry->gpe_pp; 1096 if (pp != NULL) { 1097 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) { 1098 gctl_error(req, "%d", EBUSY); 1099 return (EBUSY); 1100 } 1101 1102 pp->private = NULL; 1103 entry->gpe_pp = NULL; 1104 } 1105 1106 if (pp != NULL) 1107 g_wither_provider(pp, ENXIO); 1108 1109 /* Provide feedback if so requested. */ 1110 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1111 sb = sbuf_new_auto(); 1112 G_PART_FULLNAME(table, entry, sb, gp->name); 1113 sbuf_cat(sb, " deleted\n"); 1114 sbuf_finish(sb); 1115 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1116 sbuf_delete(sb); 1117 } 1118 1119 if (entry->gpe_created) { 1120 LIST_REMOVE(entry, gpe_entry); 1121 g_free(entry); 1122 } else { 1123 entry->gpe_modified = 0; 1124 entry->gpe_deleted = 1; 1125 } 1126 return (0); 1127} 1128 1129static int 1130g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp) 1131{ 1132 struct g_consumer *cp; 1133 struct g_geom *gp; 1134 struct g_provider *pp; 1135 struct g_part_entry *entry, *tmp; 1136 struct g_part_table *null, *table; 1137 struct sbuf *sb; 1138 int error; 1139 1140 gp = gpp->gpp_geom; 1141 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1142 g_topology_assert(); 1143 1144 table = gp->softc; 1145 /* Check for busy providers. */ 1146 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1147 if (entry->gpe_deleted || entry->gpe_internal) 1148 continue; 1149 if (gpp->gpp_force) { 1150 pp = entry->gpe_pp; 1151 if (pp == NULL) 1152 continue; 1153 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 1154 continue; 1155 } 1156 gctl_error(req, "%d", EBUSY); 1157 return (EBUSY); 1158 } 1159 1160 if (gpp->gpp_force) { 1161 /* Destroy all providers. */ 1162 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1163 pp = entry->gpe_pp; 1164 if (pp != NULL) { 1165 pp->private = NULL; 1166 g_wither_provider(pp, ENXIO); 1167 } 1168 LIST_REMOVE(entry, gpe_entry); 1169 g_free(entry); 1170 } 1171 } 1172 1173 error = G_PART_DESTROY(table, gpp); 1174 if (error) { 1175 gctl_error(req, "%d", error); 1176 return (error); 1177 } 1178 1179 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM, 1180 M_WAITOK); 1181 null = gp->softc; 1182 null->gpt_gp = gp; 1183 null->gpt_scheme = &g_part_null_scheme; 1184 LIST_INIT(&null->gpt_entry); 1185 1186 cp = LIST_FIRST(&gp->consumer); 1187 pp = cp->provider; 1188 null->gpt_last = pp->mediasize / pp->sectorsize - 1; 1189 1190 null->gpt_depth = table->gpt_depth; 1191 null->gpt_opened = table->gpt_opened; 1192 null->gpt_smhead = table->gpt_smhead; 1193 null->gpt_smtail = table->gpt_smtail; 1194 1195 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1196 LIST_REMOVE(entry, gpe_entry); 1197 g_free(entry); 1198 } 1199 kobj_delete((kobj_t)table, M_GEOM); 1200 1201 /* Provide feedback if so requested. */ 1202 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1203 sb = sbuf_new_auto(); 1204 sbuf_printf(sb, "%s destroyed\n", gp->name); 1205 sbuf_finish(sb); 1206 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1207 sbuf_delete(sb); 1208 } 1209 return (0); 1210} 1211 1212static int 1213g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp) 1214{ 1215 struct g_geom *gp; 1216 struct g_part_entry *entry; 1217 struct g_part_table *table; 1218 struct sbuf *sb; 1219 int error; 1220 1221 gp = gpp->gpp_geom; 1222 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1223 g_topology_assert(); 1224 1225 table = gp->softc; 1226 1227 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1228 if (entry->gpe_deleted || entry->gpe_internal) 1229 continue; 1230 if (entry->gpe_index == gpp->gpp_index) 1231 break; 1232 } 1233 if (entry == NULL) { 1234 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1235 return (ENOENT); 1236 } 1237 1238 error = G_PART_MODIFY(table, entry, gpp); 1239 if (error) { 1240 gctl_error(req, "%d", error); 1241 return (error); 1242 } 1243 1244 if (!entry->gpe_created) 1245 entry->gpe_modified = 1; 1246 1247 /* Provide feedback if so requested. */ 1248 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1249 sb = sbuf_new_auto(); 1250 G_PART_FULLNAME(table, entry, sb, gp->name); 1251 sbuf_cat(sb, " modified\n"); 1252 sbuf_finish(sb); 1253 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1254 sbuf_delete(sb); 1255 } 1256 return (0); 1257} 1258 1259static int 1260g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp) 1261{ 1262 gctl_error(req, "%d verb 'move'", ENOSYS); 1263 return (ENOSYS); 1264} 1265 1266static int 1267g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp) 1268{ 1269 struct g_part_table *table; 1270 struct g_geom *gp; 1271 struct sbuf *sb; 1272 int error, recovered; 1273 1274 gp = gpp->gpp_geom; 1275 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1276 g_topology_assert(); 1277 table = gp->softc; 1278 error = recovered = 0; 1279 1280 if (table->gpt_corrupt) { 1281 error = G_PART_RECOVER(table); 1282 if (error == 0) 1283 error = g_part_check_integrity(table, 1284 LIST_FIRST(&gp->consumer)); 1285 if (error) { 1286 gctl_error(req, "%d recovering '%s' failed", 1287 error, gp->name); 1288 return (error); 1289 } 1290 recovered = 1; 1291 } 1292 /* Provide feedback if so requested. */ 1293 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1294 sb = sbuf_new_auto(); 1295 if (recovered) 1296 sbuf_printf(sb, "%s recovered\n", gp->name); 1297 else 1298 sbuf_printf(sb, "%s recovering is not needed\n", 1299 gp->name); 1300 sbuf_finish(sb); 1301 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1302 sbuf_delete(sb); 1303 } 1304 return (0); 1305} 1306 1307static int 1308g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp) 1309{ 1310 struct g_geom *gp; 1311 struct g_provider *pp; 1312 struct g_part_entry *pe, *entry; 1313 struct g_part_table *table; 1314 struct sbuf *sb; 1315 quad_t end; 1316 int error; 1317 off_t mediasize; 1318 1319 gp = gpp->gpp_geom; 1320 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1321 g_topology_assert(); 1322 table = gp->softc; 1323 1324 /* check gpp_index */ 1325 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1326 if (entry->gpe_deleted || entry->gpe_internal) 1327 continue; 1328 if (entry->gpe_index == gpp->gpp_index) 1329 break; 1330 } 1331 if (entry == NULL) { 1332 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index); 1333 return (ENOENT); 1334 } 1335 1336 /* check gpp_size */ 1337 end = entry->gpe_start + gpp->gpp_size - 1; 1338 if (gpp->gpp_size < 1 || end > table->gpt_last) { 1339 gctl_error(req, "%d size '%jd'", EINVAL, 1340 (intmax_t)gpp->gpp_size); 1341 return (EINVAL); 1342 } 1343 1344 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) { 1345 if (pe->gpe_deleted || pe->gpe_internal || pe == entry) 1346 continue; 1347 if (end >= pe->gpe_start && end <= pe->gpe_end) { 1348 gctl_error(req, "%d end '%jd'", ENOSPC, 1349 (intmax_t)end); 1350 return (ENOSPC); 1351 } 1352 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) { 1353 gctl_error(req, "%d size '%jd'", ENOSPC, 1354 (intmax_t)gpp->gpp_size); 1355 return (ENOSPC); 1356 } 1357 } 1358 1359 pp = entry->gpe_pp; 1360 if ((g_debugflags & 16) == 0 && 1361 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) { 1362 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) { 1363 /* Deny shrinking of an opened partition. */ 1364 gctl_error(req, "%d", EBUSY); 1365 return (EBUSY); 1366 } 1367 } 1368 1369 error = G_PART_RESIZE(table, entry, gpp); 1370 if (error) { 1371 gctl_error(req, "%d%s", error, error != EBUSY ? "": 1372 " resizing will lead to unexpected shrinking" 1373 " due to alignment"); 1374 return (error); 1375 } 1376 1377 if (!entry->gpe_created) 1378 entry->gpe_modified = 1; 1379 1380 /* update mediasize of changed provider */ 1381 mediasize = (entry->gpe_end - entry->gpe_start + 1) * 1382 pp->sectorsize; 1383 g_resize_provider(pp, mediasize); 1384 1385 /* Provide feedback if so requested. */ 1386 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1387 sb = sbuf_new_auto(); 1388 G_PART_FULLNAME(table, entry, sb, gp->name); 1389 sbuf_cat(sb, " resized\n"); 1390 sbuf_finish(sb); 1391 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1392 sbuf_delete(sb); 1393 } 1394 return (0); 1395} 1396 1397static int 1398g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp, 1399 unsigned int set) 1400{ 1401 struct g_geom *gp; 1402 struct g_part_entry *entry; 1403 struct g_part_table *table; 1404 struct sbuf *sb; 1405 int error; 1406 1407 gp = gpp->gpp_geom; 1408 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1409 g_topology_assert(); 1410 1411 table = gp->softc; 1412 1413 if (gpp->gpp_parms & G_PART_PARM_INDEX) { 1414 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1415 if (entry->gpe_deleted || entry->gpe_internal) 1416 continue; 1417 if (entry->gpe_index == gpp->gpp_index) 1418 break; 1419 } 1420 if (entry == NULL) { 1421 gctl_error(req, "%d index '%d'", ENOENT, 1422 gpp->gpp_index); 1423 return (ENOENT); 1424 } 1425 } else 1426 entry = NULL; 1427 1428 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set); 1429 if (error) { 1430 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib); 1431 return (error); 1432 } 1433 1434 /* Provide feedback if so requested. */ 1435 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) { 1436 sb = sbuf_new_auto(); 1437 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib, 1438 (set) ? "" : "un"); 1439 if (entry) 1440 G_PART_FULLNAME(table, entry, sb, gp->name); 1441 else 1442 sbuf_cat(sb, gp->name); 1443 sbuf_cat(sb, "\n"); 1444 sbuf_finish(sb); 1445 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); 1446 sbuf_delete(sb); 1447 } 1448 return (0); 1449} 1450 1451static int 1452g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp) 1453{ 1454 struct g_consumer *cp; 1455 struct g_provider *pp; 1456 struct g_geom *gp; 1457 struct g_part_entry *entry, *tmp; 1458 struct g_part_table *table; 1459 int error, reprobe; 1460 1461 gp = gpp->gpp_geom; 1462 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name)); 1463 g_topology_assert(); 1464 1465 table = gp->softc; 1466 if (!table->gpt_opened) { 1467 gctl_error(req, "%d", EPERM); 1468 return (EPERM); 1469 } 1470 1471 cp = LIST_FIRST(&gp->consumer); 1472 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) { 1473 entry->gpe_modified = 0; 1474 if (entry->gpe_created) { 1475 pp = entry->gpe_pp; 1476 if (pp != NULL) { 1477 pp->private = NULL; 1478 entry->gpe_pp = NULL; 1479 g_wither_provider(pp, ENXIO); 1480 } 1481 entry->gpe_deleted = 1; 1482 } 1483 if (entry->gpe_deleted) { 1484 LIST_REMOVE(entry, gpe_entry); 1485 g_free(entry); 1486 } 1487 } 1488 1489 g_topology_unlock(); 1490 1491 reprobe = (table->gpt_scheme == &g_part_null_scheme || 1492 table->gpt_created) ? 1 : 0; 1493 1494 if (reprobe) { 1495 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1496 if (entry->gpe_internal) 1497 continue; 1498 error = EBUSY; 1499 goto fail; 1500 } 1501 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1502 LIST_REMOVE(entry, gpe_entry); 1503 g_free(entry); 1504 } 1505 error = g_part_probe(gp, cp, table->gpt_depth); 1506 if (error) { 1507 g_topology_lock(); 1508 g_access(cp, -1, -1, -1); 1509 g_part_wither(gp, error); 1510 return (0); 1511 } 1512 table = gp->softc; 1513 1514 /* 1515 * Synthesize a disk geometry. Some partitioning schemes 1516 * depend on it and since some file systems need it even 1517 * when the partitition scheme doesn't, we do it here in 1518 * scheme-independent code. 1519 */ 1520 pp = cp->provider; 1521 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1522 } 1523 1524 error = G_PART_READ(table, cp); 1525 if (error) 1526 goto fail; 1527 error = g_part_check_integrity(table, cp); 1528 if (error) 1529 goto fail; 1530 1531 g_topology_lock(); 1532 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 1533 if (!entry->gpe_internal) 1534 g_part_new_provider(gp, table, entry); 1535 } 1536 1537 table->gpt_opened = 0; 1538 g_access(cp, -1, -1, -1); 1539 return (0); 1540 1541fail: 1542 g_topology_lock(); 1543 gctl_error(req, "%d", error); 1544 return (error); 1545} 1546 1547static void 1548g_part_wither(struct g_geom *gp, int error) 1549{ 1550 struct g_part_entry *entry; 1551 struct g_part_table *table; 1552 1553 table = gp->softc; 1554 if (table != NULL) { 1555 G_PART_DESTROY(table, NULL); 1556 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) { 1557 LIST_REMOVE(entry, gpe_entry); 1558 g_free(entry); 1559 } 1560 if (gp->softc != NULL) { 1561 kobj_delete((kobj_t)gp->softc, M_GEOM); 1562 gp->softc = NULL; 1563 } 1564 } 1565 g_wither_geom(gp, error); 1566} 1567 1568/* 1569 * Class methods. 1570 */ 1571 1572static void 1573g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb) 1574{ 1575 struct g_part_parms gpp; 1576 struct g_part_table *table; 1577 struct gctl_req_arg *ap; 1578 enum g_part_ctl ctlreq; 1579 unsigned int i, mparms, oparms, parm; 1580 int auto_commit, close_on_error; 1581 int error, modifies; 1582 1583 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb)); 1584 g_topology_assert(); 1585 1586 ctlreq = G_PART_CTL_NONE; 1587 modifies = 1; 1588 mparms = 0; 1589 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION; 1590 switch (*verb) { 1591 case 'a': 1592 if (!strcmp(verb, "add")) { 1593 ctlreq = G_PART_CTL_ADD; 1594 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE | 1595 G_PART_PARM_START | G_PART_PARM_TYPE; 1596 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL; 1597 } 1598 break; 1599 case 'b': 1600 if (!strcmp(verb, "bootcode")) { 1601 ctlreq = G_PART_CTL_BOOTCODE; 1602 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE; 1603 } 1604 break; 1605 case 'c': 1606 if (!strcmp(verb, "commit")) { 1607 ctlreq = G_PART_CTL_COMMIT; 1608 mparms |= G_PART_PARM_GEOM; 1609 modifies = 0; 1610 } else if (!strcmp(verb, "create")) { 1611 ctlreq = G_PART_CTL_CREATE; 1612 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME; 1613 oparms |= G_PART_PARM_ENTRIES; 1614 } 1615 break; 1616 case 'd': 1617 if (!strcmp(verb, "delete")) { 1618 ctlreq = G_PART_CTL_DELETE; 1619 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1620 } else if (!strcmp(verb, "destroy")) { 1621 ctlreq = G_PART_CTL_DESTROY; 1622 mparms |= G_PART_PARM_GEOM; 1623 oparms |= G_PART_PARM_FORCE; 1624 } 1625 break; 1626 case 'm': 1627 if (!strcmp(verb, "modify")) { 1628 ctlreq = G_PART_CTL_MODIFY; 1629 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1630 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE; 1631 } else if (!strcmp(verb, "move")) { 1632 ctlreq = G_PART_CTL_MOVE; 1633 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX; 1634 } 1635 break; 1636 case 'r': 1637 if (!strcmp(verb, "recover")) { 1638 ctlreq = G_PART_CTL_RECOVER; 1639 mparms |= G_PART_PARM_GEOM; 1640 } else if (!strcmp(verb, "resize")) { 1641 ctlreq = G_PART_CTL_RESIZE; 1642 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX | 1643 G_PART_PARM_SIZE; 1644 } 1645 break; 1646 case 's': 1647 if (!strcmp(verb, "set")) { 1648 ctlreq = G_PART_CTL_SET; 1649 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1650 oparms |= G_PART_PARM_INDEX; 1651 } 1652 break; 1653 case 'u': 1654 if (!strcmp(verb, "undo")) { 1655 ctlreq = G_PART_CTL_UNDO; 1656 mparms |= G_PART_PARM_GEOM; 1657 modifies = 0; 1658 } else if (!strcmp(verb, "unset")) { 1659 ctlreq = G_PART_CTL_UNSET; 1660 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM; 1661 oparms |= G_PART_PARM_INDEX; 1662 } 1663 break; 1664 } 1665 if (ctlreq == G_PART_CTL_NONE) { 1666 gctl_error(req, "%d verb '%s'", EINVAL, verb); 1667 return; 1668 } 1669 1670 bzero(&gpp, sizeof(gpp)); 1671 for (i = 0; i < req->narg; i++) { 1672 ap = &req->arg[i]; 1673 parm = 0; 1674 switch (ap->name[0]) { 1675 case 'a': 1676 if (!strcmp(ap->name, "arg0")) { 1677 parm = mparms & 1678 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER); 1679 } 1680 if (!strcmp(ap->name, "attrib")) 1681 parm = G_PART_PARM_ATTRIB; 1682 break; 1683 case 'b': 1684 if (!strcmp(ap->name, "bootcode")) 1685 parm = G_PART_PARM_BOOTCODE; 1686 break; 1687 case 'c': 1688 if (!strcmp(ap->name, "class")) 1689 continue; 1690 break; 1691 case 'e': 1692 if (!strcmp(ap->name, "entries")) 1693 parm = G_PART_PARM_ENTRIES; 1694 break; 1695 case 'f': 1696 if (!strcmp(ap->name, "flags")) 1697 parm = G_PART_PARM_FLAGS; 1698 else if (!strcmp(ap->name, "force")) 1699 parm = G_PART_PARM_FORCE; 1700 break; 1701 case 'i': 1702 if (!strcmp(ap->name, "index")) 1703 parm = G_PART_PARM_INDEX; 1704 break; 1705 case 'l': 1706 if (!strcmp(ap->name, "label")) 1707 parm = G_PART_PARM_LABEL; 1708 break; 1709 case 'o': 1710 if (!strcmp(ap->name, "output")) 1711 parm = G_PART_PARM_OUTPUT; 1712 break; 1713 case 's': 1714 if (!strcmp(ap->name, "scheme")) 1715 parm = G_PART_PARM_SCHEME; 1716 else if (!strcmp(ap->name, "size")) 1717 parm = G_PART_PARM_SIZE; 1718 else if (!strcmp(ap->name, "start")) 1719 parm = G_PART_PARM_START; 1720 break; 1721 case 't': 1722 if (!strcmp(ap->name, "type")) 1723 parm = G_PART_PARM_TYPE; 1724 break; 1725 case 'v': 1726 if (!strcmp(ap->name, "verb")) 1727 continue; 1728 else if (!strcmp(ap->name, "version")) 1729 parm = G_PART_PARM_VERSION; 1730 break; 1731 } 1732 if ((parm & (mparms | oparms)) == 0) { 1733 gctl_error(req, "%d param '%s'", EINVAL, ap->name); 1734 return; 1735 } 1736 switch (parm) { 1737 case G_PART_PARM_ATTRIB: 1738 error = g_part_parm_str(req, ap->name, 1739 &gpp.gpp_attrib); 1740 break; 1741 case G_PART_PARM_BOOTCODE: 1742 error = g_part_parm_bootcode(req, ap->name, 1743 &gpp.gpp_codeptr, &gpp.gpp_codesize); 1744 break; 1745 case G_PART_PARM_ENTRIES: 1746 error = g_part_parm_intmax(req, ap->name, 1747 &gpp.gpp_entries); 1748 break; 1749 case G_PART_PARM_FLAGS: 1750 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags); 1751 break; 1752 case G_PART_PARM_FORCE: 1753 error = g_part_parm_uint32(req, ap->name, 1754 &gpp.gpp_force); 1755 break; 1756 case G_PART_PARM_GEOM: 1757 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom); 1758 break; 1759 case G_PART_PARM_INDEX: 1760 error = g_part_parm_intmax(req, ap->name, 1761 &gpp.gpp_index); 1762 break; 1763 case G_PART_PARM_LABEL: 1764 error = g_part_parm_str(req, ap->name, &gpp.gpp_label); 1765 break; 1766 case G_PART_PARM_OUTPUT: 1767 error = 0; /* Write-only parameter */ 1768 break; 1769 case G_PART_PARM_PROVIDER: 1770 error = g_part_parm_provider(req, ap->name, 1771 &gpp.gpp_provider); 1772 break; 1773 case G_PART_PARM_SCHEME: 1774 error = g_part_parm_scheme(req, ap->name, 1775 &gpp.gpp_scheme); 1776 break; 1777 case G_PART_PARM_SIZE: 1778 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size); 1779 break; 1780 case G_PART_PARM_START: 1781 error = g_part_parm_quad(req, ap->name, 1782 &gpp.gpp_start); 1783 break; 1784 case G_PART_PARM_TYPE: 1785 error = g_part_parm_str(req, ap->name, &gpp.gpp_type); 1786 break; 1787 case G_PART_PARM_VERSION: 1788 error = g_part_parm_uint32(req, ap->name, 1789 &gpp.gpp_version); 1790 break; 1791 default: 1792 error = EDOOFUS; 1793 gctl_error(req, "%d %s", error, ap->name); 1794 break; 1795 } 1796 if (error != 0) { 1797 if (error == ENOATTR) { 1798 gctl_error(req, "%d param '%s'", error, 1799 ap->name); 1800 } 1801 return; 1802 } 1803 gpp.gpp_parms |= parm; 1804 } 1805 if ((gpp.gpp_parms & mparms) != mparms) { 1806 parm = mparms - (gpp.gpp_parms & mparms); 1807 gctl_error(req, "%d param '%x'", ENOATTR, parm); 1808 return; 1809 } 1810 1811 /* Obtain permissions if possible/necessary. */ 1812 close_on_error = 0; 1813 table = NULL; 1814 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) { 1815 table = gpp.gpp_geom->softc; 1816 if (table != NULL && table->gpt_corrupt && 1817 ctlreq != G_PART_CTL_DESTROY && 1818 ctlreq != G_PART_CTL_RECOVER) { 1819 gctl_error(req, "%d table '%s' is corrupt", 1820 EPERM, gpp.gpp_geom->name); 1821 return; 1822 } 1823 if (table != NULL && !table->gpt_opened) { 1824 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer), 1825 1, 1, 1); 1826 if (error) { 1827 gctl_error(req, "%d geom '%s'", error, 1828 gpp.gpp_geom->name); 1829 return; 1830 } 1831 table->gpt_opened = 1; 1832 close_on_error = 1; 1833 } 1834 } 1835 1836 /* Allow the scheme to check or modify the parameters. */ 1837 if (table != NULL) { 1838 error = G_PART_PRECHECK(table, ctlreq, &gpp); 1839 if (error) { 1840 gctl_error(req, "%d pre-check failed", error); 1841 goto out; 1842 } 1843 } else 1844 error = EDOOFUS; /* Prevent bogus uninit. warning. */ 1845 1846 switch (ctlreq) { 1847 case G_PART_CTL_NONE: 1848 panic("%s", __func__); 1849 case G_PART_CTL_ADD: 1850 error = g_part_ctl_add(req, &gpp); 1851 break; 1852 case G_PART_CTL_BOOTCODE: 1853 error = g_part_ctl_bootcode(req, &gpp); 1854 break; 1855 case G_PART_CTL_COMMIT: 1856 error = g_part_ctl_commit(req, &gpp); 1857 break; 1858 case G_PART_CTL_CREATE: 1859 error = g_part_ctl_create(req, &gpp); 1860 break; 1861 case G_PART_CTL_DELETE: 1862 error = g_part_ctl_delete(req, &gpp); 1863 break; 1864 case G_PART_CTL_DESTROY: 1865 error = g_part_ctl_destroy(req, &gpp); 1866 break; 1867 case G_PART_CTL_MODIFY: 1868 error = g_part_ctl_modify(req, &gpp); 1869 break; 1870 case G_PART_CTL_MOVE: 1871 error = g_part_ctl_move(req, &gpp); 1872 break; 1873 case G_PART_CTL_RECOVER: 1874 error = g_part_ctl_recover(req, &gpp); 1875 break; 1876 case G_PART_CTL_RESIZE: 1877 error = g_part_ctl_resize(req, &gpp); 1878 break; 1879 case G_PART_CTL_SET: 1880 error = g_part_ctl_setunset(req, &gpp, 1); 1881 break; 1882 case G_PART_CTL_UNDO: 1883 error = g_part_ctl_undo(req, &gpp); 1884 break; 1885 case G_PART_CTL_UNSET: 1886 error = g_part_ctl_setunset(req, &gpp, 0); 1887 break; 1888 } 1889 1890 /* Implement automatic commit. */ 1891 if (!error) { 1892 auto_commit = (modifies && 1893 (gpp.gpp_parms & G_PART_PARM_FLAGS) && 1894 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0; 1895 if (auto_commit) { 1896 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s", 1897 __func__)); 1898 error = g_part_ctl_commit(req, &gpp); 1899 } 1900 } 1901 1902 out: 1903 if (error && close_on_error) { 1904 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1); 1905 table->gpt_opened = 0; 1906 } 1907} 1908 1909static int 1910g_part_destroy_geom(struct gctl_req *req, struct g_class *mp, 1911 struct g_geom *gp) 1912{ 1913 1914 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name)); 1915 g_topology_assert(); 1916 1917 g_part_wither(gp, EINVAL); 1918 return (0); 1919} 1920 1921static struct g_geom * 1922g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1923{ 1924 struct g_consumer *cp; 1925 struct g_geom *gp; 1926 struct g_part_entry *entry; 1927 struct g_part_table *table; 1928 struct root_hold_token *rht; 1929 int attr, depth; 1930 int error; 1931 1932 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name)); 1933 g_topology_assert(); 1934 1935 /* Skip providers that are already open for writing. */ 1936 if (pp->acw > 0) 1937 return (NULL); 1938 1939 /* 1940 * Create a GEOM with consumer and hook it up to the provider. 1941 * With that we become part of the topology. Optain read access 1942 * to the provider. 1943 */ 1944 gp = g_new_geomf(mp, "%s", pp->name); 1945 cp = g_new_consumer(gp); 1946 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1947 error = g_attach(cp, pp); 1948 if (error == 0) 1949 error = g_access(cp, 1, 0, 0); 1950 if (error != 0) { 1951 if (cp->provider) 1952 g_detach(cp); 1953 g_destroy_consumer(cp); 1954 g_destroy_geom(gp); 1955 return (NULL); 1956 } 1957 1958 rht = root_mount_hold(mp->name); 1959 g_topology_unlock(); 1960 1961 /* 1962 * Short-circuit the whole probing galore when there's no 1963 * media present. 1964 */ 1965 if (pp->mediasize == 0 || pp->sectorsize == 0) { 1966 error = ENODEV; 1967 goto fail; 1968 } 1969 1970 /* Make sure we can nest and if so, determine our depth. */ 1971 error = g_getattr("PART::isleaf", cp, &attr); 1972 if (!error && attr) { 1973 error = ENODEV; 1974 goto fail; 1975 } 1976 error = g_getattr("PART::depth", cp, &attr); 1977 depth = (!error) ? attr + 1 : 0; 1978 1979 error = g_part_probe(gp, cp, depth); 1980 if (error) 1981 goto fail; 1982 1983 table = gp->softc; 1984 1985 /* 1986 * Synthesize a disk geometry. Some partitioning schemes 1987 * depend on it and since some file systems need it even 1988 * when the partitition scheme doesn't, we do it here in 1989 * scheme-independent code. 1990 */ 1991 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize); 1992 1993 error = G_PART_READ(table, cp); 1994 if (error) 1995 goto fail; 1996 error = g_part_check_integrity(table, cp); 1997 if (error) 1998 goto fail; 1999 2000 g_topology_lock(); 2001 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) { 2002 if (!entry->gpe_internal) 2003 g_part_new_provider(gp, table, entry); 2004 } 2005 2006 root_mount_rel(rht); 2007 g_access(cp, -1, 0, 0); 2008 return (gp); 2009 2010 fail: 2011 g_topology_lock(); 2012 root_mount_rel(rht); 2013 g_access(cp, -1, 0, 0); 2014 g_detach(cp); 2015 g_destroy_consumer(cp); 2016 g_destroy_geom(gp); 2017 return (NULL); 2018} 2019 2020/* 2021 * Geom methods. 2022 */ 2023 2024static int 2025g_part_access(struct g_provider *pp, int dr, int dw, int de) 2026{ 2027 struct g_consumer *cp; 2028 2029 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr, 2030 dw, de)); 2031 2032 cp = LIST_FIRST(&pp->geom->consumer); 2033 2034 /* We always gain write-exclusive access. */ 2035 return (g_access(cp, dr, dw, dw + de)); 2036} 2037 2038static void 2039g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2040 struct g_consumer *cp, struct g_provider *pp) 2041{ 2042 char buf[64]; 2043 struct g_part_entry *entry; 2044 struct g_part_table *table; 2045 2046 KASSERT(sb != NULL && gp != NULL, ("%s", __func__)); 2047 table = gp->softc; 2048 2049 if (indent == NULL) { 2050 KASSERT(cp == NULL && pp != NULL, ("%s", __func__)); 2051 entry = pp->private; 2052 if (entry == NULL) 2053 return; 2054 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index, 2055 (uintmax_t)entry->gpe_offset, 2056 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2057 /* 2058 * libdisk compatibility quirk - the scheme dumps the 2059 * slicer name and partition type in a way that is 2060 * compatible with libdisk. When libdisk is not used 2061 * anymore, this should go away. 2062 */ 2063 G_PART_DUMPCONF(table, entry, sb, indent); 2064 } else if (cp != NULL) { /* Consumer configuration. */ 2065 KASSERT(pp == NULL, ("%s", __func__)); 2066 /* none */ 2067 } else if (pp != NULL) { /* Provider configuration. */ 2068 entry = pp->private; 2069 if (entry == NULL) 2070 return; 2071 sbuf_printf(sb, "%s<start>%ju</start>\n", indent, 2072 (uintmax_t)entry->gpe_start); 2073 sbuf_printf(sb, "%s<end>%ju</end>\n", indent, 2074 (uintmax_t)entry->gpe_end); 2075 sbuf_printf(sb, "%s<index>%u</index>\n", indent, 2076 entry->gpe_index); 2077 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2078 G_PART_TYPE(table, entry, buf, sizeof(buf))); 2079 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 2080 (uintmax_t)entry->gpe_offset); 2081 sbuf_printf(sb, "%s<length>%ju</length>\n", indent, 2082 (uintmax_t)pp->mediasize); 2083 G_PART_DUMPCONF(table, entry, sb, indent); 2084 } else { /* Geom configuration. */ 2085 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent, 2086 table->gpt_scheme->name); 2087 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent, 2088 table->gpt_entries); 2089 sbuf_printf(sb, "%s<first>%ju</first>\n", indent, 2090 (uintmax_t)table->gpt_first); 2091 sbuf_printf(sb, "%s<last>%ju</last>\n", indent, 2092 (uintmax_t)table->gpt_last); 2093 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent, 2094 table->gpt_sectors); 2095 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent, 2096 table->gpt_heads); 2097 sbuf_printf(sb, "%s<state>%s</state>\n", indent, 2098 table->gpt_corrupt ? "CORRUPT": "OK"); 2099 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent, 2100 table->gpt_opened ? "true": "false"); 2101 G_PART_DUMPCONF(table, NULL, sb, indent); 2102 } 2103} 2104 2105/*- 2106 * This start routine is only called for non-trivial requests, all the 2107 * trivial ones are handled autonomously by the slice code. 2108 * For requests we handle here, we must call the g_io_deliver() on the 2109 * bio, and return non-zero to indicate to the slice code that we did so. 2110 * This code executes in the "DOWN" I/O path, this means: 2111 * * No sleeping. 2112 * * Don't grab the topology lock. 2113 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() 2114 */ 2115static int 2116g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) 2117{ 2118 struct g_part_table *table; 2119 2120 table = pp->geom->softc; 2121 return G_PART_IOCTL(table, pp, cmd, data, fflag, td); 2122} 2123 2124static void 2125g_part_resize(struct g_consumer *cp) 2126{ 2127 struct g_part_table *table; 2128 2129 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2130 g_topology_assert(); 2131 2132 table = cp->geom->softc; 2133 if (table->gpt_opened == 0) { 2134 if (g_access(cp, 1, 1, 1) != 0) 2135 return; 2136 table->gpt_opened = 1; 2137 } 2138 if (G_PART_RESIZE(table, NULL, NULL) == 0) 2139 printf("GEOM_PART: %s was automatically resized.\n" 2140 " Use `gpart commit %s` to save changes or " 2141 "`gpart undo %s` to revert them.\n", cp->geom->name, 2142 cp->geom->name, cp->geom->name); 2143 if (g_part_check_integrity(table, cp) != 0) { 2144 g_access(cp, -1, -1, -1); 2145 table->gpt_opened = 0; 2146 g_part_wither(table->gpt_gp, ENXIO); 2147 } 2148} 2149 2150static void 2151g_part_orphan(struct g_consumer *cp) 2152{ 2153 struct g_provider *pp; 2154 struct g_part_table *table; 2155 2156 pp = cp->provider; 2157 KASSERT(pp != NULL, ("%s", __func__)); 2158 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name)); 2159 g_topology_assert(); 2160 2161 KASSERT(pp->error != 0, ("%s", __func__)); 2162 table = cp->geom->softc; 2163 if (table != NULL && table->gpt_opened) 2164 g_access(cp, -1, -1, -1); 2165 g_part_wither(cp->geom, pp->error); 2166} 2167 2168static void 2169g_part_spoiled(struct g_consumer *cp) 2170{ 2171 2172 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name)); 2173 g_topology_assert(); 2174 2175 cp->flags |= G_CF_ORPHAN; 2176 g_part_wither(cp->geom, ENXIO); 2177} 2178 2179static void 2180g_part_start(struct bio *bp) 2181{ 2182 struct bio *bp2; 2183 struct g_consumer *cp; 2184 struct g_geom *gp; 2185 struct g_part_entry *entry; 2186 struct g_part_table *table; 2187 struct g_kerneldump *gkd; 2188 struct g_provider *pp; 2189 void (*done_func)(struct bio *) = g_std_done; 2190 char buf[64]; 2191 2192 pp = bp->bio_to; 2193 gp = pp->geom; 2194 table = gp->softc; 2195 cp = LIST_FIRST(&gp->consumer); 2196 2197 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd, 2198 pp->name)); 2199 2200 entry = pp->private; 2201 if (entry == NULL) { 2202 g_io_deliver(bp, ENXIO); 2203 return; 2204 } 2205 2206 switch(bp->bio_cmd) { 2207 case BIO_DELETE: 2208 case BIO_READ: 2209 case BIO_WRITE: 2210 if (bp->bio_offset >= pp->mediasize) { 2211 g_io_deliver(bp, EIO); 2212 return; 2213 } 2214 bp2 = g_clone_bio(bp); 2215 if (bp2 == NULL) { 2216 g_io_deliver(bp, ENOMEM); 2217 return; 2218 } 2219 if (bp2->bio_offset + bp2->bio_length > pp->mediasize) 2220 bp2->bio_length = pp->mediasize - bp2->bio_offset; 2221 bp2->bio_done = g_std_done; 2222 bp2->bio_offset += entry->gpe_offset; 2223 g_io_request(bp2, cp); 2224 return; 2225 case BIO_FLUSH: 2226 break; 2227 case BIO_GETATTR: 2228 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads)) 2229 return; 2230 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors)) 2231 return; 2232 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf)) 2233 return; 2234 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth)) 2235 return; 2236 if (g_handleattr_str(bp, "PART::scheme", 2237 table->gpt_scheme->name)) 2238 return; 2239 if (g_handleattr_str(bp, "PART::type", 2240 G_PART_TYPE(table, entry, buf, sizeof(buf)))) 2241 return; 2242 if (!strcmp("GEOM::physpath", bp->bio_attribute)) { 2243 done_func = g_part_get_physpath_done; 2244 break; 2245 } 2246 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 2247 /* 2248 * Check that the partition is suitable for kernel 2249 * dumps. Typically only swap partitions should be 2250 * used. If the request comes from the nested scheme 2251 * we allow dumping there as well. 2252 */ 2253 if ((bp->bio_from == NULL || 2254 bp->bio_from->geom->class != &g_part_class) && 2255 G_PART_DUMPTO(table, entry) == 0) { 2256 g_io_deliver(bp, ENODEV); 2257 printf("GEOM_PART: Partition '%s' not suitable" 2258 " for kernel dumps (wrong type?)\n", 2259 pp->name); 2260 return; 2261 } 2262 gkd = (struct g_kerneldump *)bp->bio_data; 2263 if (gkd->offset >= pp->mediasize) { 2264 g_io_deliver(bp, EIO); 2265 return; 2266 } 2267 if (gkd->offset + gkd->length > pp->mediasize) 2268 gkd->length = pp->mediasize - gkd->offset; 2269 gkd->offset += entry->gpe_offset; 2270 } 2271 break; 2272 default: 2273 g_io_deliver(bp, EOPNOTSUPP); 2274 return; 2275 } 2276 2277 bp2 = g_clone_bio(bp); 2278 if (bp2 == NULL) { 2279 g_io_deliver(bp, ENOMEM); 2280 return; 2281 } 2282 bp2->bio_done = done_func; 2283 g_io_request(bp2, cp); 2284} 2285 2286static void 2287g_part_init(struct g_class *mp) 2288{ 2289 2290 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list); 2291} 2292 2293static void 2294g_part_fini(struct g_class *mp) 2295{ 2296 2297 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list); 2298} 2299 2300static void 2301g_part_unload_event(void *arg, int flag) 2302{ 2303 struct g_consumer *cp; 2304 struct g_geom *gp; 2305 struct g_provider *pp; 2306 struct g_part_scheme *scheme; 2307 struct g_part_table *table; 2308 uintptr_t *xchg; 2309 int acc, error; 2310 2311 if (flag == EV_CANCEL) 2312 return; 2313 2314 xchg = arg; 2315 error = 0; 2316 scheme = (void *)(*xchg); 2317 2318 g_topology_assert(); 2319 2320 LIST_FOREACH(gp, &g_part_class.geom, geom) { 2321 table = gp->softc; 2322 if (table->gpt_scheme != scheme) 2323 continue; 2324 2325 acc = 0; 2326 LIST_FOREACH(pp, &gp->provider, provider) 2327 acc += pp->acr + pp->acw + pp->ace; 2328 LIST_FOREACH(cp, &gp->consumer, consumer) 2329 acc += cp->acr + cp->acw + cp->ace; 2330 2331 if (!acc) 2332 g_part_wither(gp, ENOSYS); 2333 else 2334 error = EBUSY; 2335 } 2336 2337 if (!error) 2338 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list); 2339 2340 *xchg = error; 2341} 2342 2343int 2344g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme) 2345{ 2346 struct g_part_scheme *iter; 2347 uintptr_t arg; 2348 int error; 2349 2350 error = 0; 2351 switch (type) { 2352 case MOD_LOAD: 2353 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) { 2354 if (scheme == iter) { 2355 printf("GEOM_PART: scheme %s is already " 2356 "registered!\n", scheme->name); 2357 break; 2358 } 2359 } 2360 if (iter == NULL) { 2361 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, 2362 scheme_list); 2363 g_retaste(&g_part_class); 2364 } 2365 break; 2366 case MOD_UNLOAD: 2367 arg = (uintptr_t)scheme; 2368 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK, 2369 NULL); 2370 if (error == 0) 2371 error = arg; 2372 break; 2373 default: 2374 error = EOPNOTSUPP; 2375 break; 2376 } 2377 2378 return (error); 2379} 2380