geom_subr.c revision 238534
1223328Sgavin/*- 2223328Sgavin * Copyright (c) 2002 Poul-Henning Kamp 379971Sobrien * Copyright (c) 2002 Networks Associates Technology, Inc. 479971Sobrien * All rights reserved. 5223328Sgavin * 679971Sobrien * This software was developed for the FreeBSD Project by Poul-Henning Kamp 779971Sobrien * and NAI Labs, the Security Research Division of Network Associates, Inc. 879971Sobrien * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 979971Sobrien * DARPA CHATS research program. 1079971Sobrien * 1179971Sobrien * Redistribution and use in source and binary forms, with or without 1279971Sobrien * modification, are permitted provided that the following conditions 1379971Sobrien * are met: 1479971Sobrien * 1. Redistributions of source code must retain the above copyright 1579971Sobrien * notice, this list of conditions and the following disclaimer. 1679971Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1779971Sobrien * notice, this list of conditions and the following disclaimer in the 1879971Sobrien * documentation and/or other materials provided with the distribution. 1979971Sobrien * 3. The names of the authors may not be used to endorse or promote 2079971Sobrien * products derived from this software without specific prior written 2179971Sobrien * permission. 2279971Sobrien * 2379971Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 2479971Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2579971Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2679971Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2779971Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2879971Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2979971Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3079971Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3179971Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3279971Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3379971Sobrien * SUCH DAMAGE. 3479971Sobrien */ 3579971Sobrien 3679971Sobrien#include <sys/cdefs.h> 3779971Sobrien__FBSDID("$FreeBSD: head/sys/geom/geom_subr.c 238534 2012-07-16 17:41:38Z trasz $"); 3879971Sobrien 3979971Sobrien#include "opt_ddb.h" 4079971Sobrien 4179971Sobrien#include <sys/param.h> 4279971Sobrien#include <sys/systm.h> 4379971Sobrien#include <sys/devicestat.h> 4479971Sobrien#include <sys/kernel.h> 45121966Smikeh#include <sys/malloc.h> 4679971Sobrien#include <sys/bio.h> 4779971Sobrien#include <sys/sysctl.h> 4879971Sobrien#include <sys/proc.h> 4979971Sobrien#include <sys/kthread.h> 5079971Sobrien#include <sys/lock.h> 5179971Sobrien#include <sys/mutex.h> 5279971Sobrien#include <sys/errno.h> 5379971Sobrien#include <sys/sbuf.h> 5479971Sobrien#include <geom/geom.h> 5579971Sobrien#include <geom/geom_int.h> 5679971Sobrien#include <machine/stdarg.h> 5779971Sobrien 5879971Sobrien#ifdef DDB 5979971Sobrien#include <ddb/ddb.h> 6079971Sobrien#endif 6179971Sobrien 6279971Sobrien#ifdef KDB 6379971Sobrien#include <sys/kdb.h> 6479971Sobrien#endif 6579971Sobrien 6679971Sobrienstruct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 67146309Smikehstatic struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 6879971Sobrienchar *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 6979971Sobrien 7079971Sobrienstruct g_hh00 { 7179971Sobrien struct g_class *mp; 7279971Sobrien struct g_provider *pp; 7379971Sobrien off_t size; 7479971Sobrien int error; 7579971Sobrien int post; 7679971Sobrien}; 7779971Sobrien 7879971Sobrien/* 79146309Smikeh * This event offers a new class a chance to taste all preexisting providers. 8079971Sobrien */ 8179971Sobrienstatic void 8279971Sobrieng_load_class(void *arg, int flag) 8379971Sobrien{ 8479971Sobrien struct g_hh00 *hh; 8579971Sobrien struct g_class *mp2, *mp; 8679971Sobrien struct g_geom *gp; 8779971Sobrien struct g_provider *pp; 8879971Sobrien 8979971Sobrien g_topology_assert(); 9079971Sobrien if (flag == EV_CANCEL) /* XXX: can't happen ? */ 9179971Sobrien return; 9279971Sobrien if (g_shutdown) 9379971Sobrien return; 9479971Sobrien 9579971Sobrien hh = arg; 9679971Sobrien mp = hh->mp; 9779971Sobrien hh->error = 0; 9879971Sobrien if (hh->post) { 9979971Sobrien g_free(hh); 10079971Sobrien hh = NULL; 10179971Sobrien } 10279971Sobrien g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 10379971Sobrien KASSERT(mp->name != NULL && *mp->name != '\0', 10479971Sobrien ("GEOM class has no name")); 10579971Sobrien LIST_FOREACH(mp2, &g_classes, class) { 10679971Sobrien if (mp2 == mp) { 10779971Sobrien printf("The GEOM class %s is already loaded.\n", 10879971Sobrien mp2->name); 10979971Sobrien if (hh != NULL) 11079971Sobrien hh->error = EEXIST; 11179971Sobrien return; 11279971Sobrien } else if (strcmp(mp2->name, mp->name) == 0) { 11379971Sobrien printf("A GEOM class %s is already loaded.\n", 11498247Smikeh mp2->name); 115223328Sgavin if (hh != NULL) 11679971Sobrien hh->error = EEXIST; 11779971Sobrien return; 11879971Sobrien } 11979971Sobrien } 12079971Sobrien 12179971Sobrien LIST_INIT(&mp->geom); 12279971Sobrien LIST_INSERT_HEAD(&g_classes, mp, class); 12379971Sobrien if (mp->init != NULL) 12479971Sobrien mp->init(mp); 12579971Sobrien if (mp->taste == NULL) 12679971Sobrien return; 12779971Sobrien LIST_FOREACH(mp2, &g_classes, class) { 12879971Sobrien if (mp == mp2) 12979971Sobrien continue; 130142129Smikeh LIST_FOREACH(gp, &mp2->geom, geom) { 13179971Sobrien LIST_FOREACH(pp, &gp->provider, provider) { 13279971Sobrien mp->taste(mp, pp, 0); 13379971Sobrien g_topology_assert(); 13479971Sobrien } 13579971Sobrien } 13679971Sobrien } 13779971Sobrien} 138223328Sgavin 13979971Sobrienstatic int 14079971Sobrieng_unload_class(struct g_class *mp) 14179971Sobrien{ 14279971Sobrien struct g_geom *gp; 14379971Sobrien struct g_provider *pp; 14479971Sobrien struct g_consumer *cp; 14579971Sobrien int error; 146223328Sgavin 14779971Sobrien g_topology_lock(); 14879971Sobrien g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 14979971Sobrienretry: 15079971Sobrien G_VALID_CLASS(mp); 15179971Sobrien LIST_FOREACH(gp, &mp->geom, geom) { 15279971Sobrien /* We refuse to unload if anything is open */ 15379971Sobrien LIST_FOREACH(pp, &gp->provider, provider) 15479971Sobrien if (pp->acr || pp->acw || pp->ace) { 15579971Sobrien g_topology_unlock(); 15679971Sobrien return (EBUSY); 15779971Sobrien } 15879971Sobrien LIST_FOREACH(cp, &gp->consumer, consumer) 15979971Sobrien if (cp->acr || cp->acw || cp->ace) { 16079971Sobrien g_topology_unlock(); 16179971Sobrien return (EBUSY); 16279971Sobrien } 16379971Sobrien /* If the geom is withering, wait for it to finish. */ 16479971Sobrien if (gp->flags & G_GEOM_WITHER) { 16579971Sobrien g_topology_sleep(mp, 1); 166223328Sgavin goto retry; 16779971Sobrien } 16879971Sobrien } 16979971Sobrien 17079971Sobrien /* 17179971Sobrien * We allow unloading if we have no geoms, or a class 17279971Sobrien * method we can use to get rid of them. 17379971Sobrien */ 17479971Sobrien if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 17579971Sobrien g_topology_unlock(); 17679971Sobrien return (EOPNOTSUPP); 17779971Sobrien } 17879971Sobrien 17979971Sobrien /* Bar new entries */ 18079971Sobrien mp->taste = NULL; 18179971Sobrien mp->config = NULL; 18279971Sobrien 18379971Sobrien LIST_FOREACH(gp, &mp->geom, geom) { 184223328Sgavin error = mp->destroy_geom(NULL, mp, gp); 18579971Sobrien if (error != 0) { 18679971Sobrien g_topology_unlock(); 18779971Sobrien return (error); 18879971Sobrien } 18979971Sobrien } 19079971Sobrien /* Wait for withering to finish. */ 191223328Sgavin for (;;) { 19279971Sobrien gp = LIST_FIRST(&mp->geom); 19379971Sobrien if (gp == NULL) 19479971Sobrien break; 195223328Sgavin KASSERT(gp->flags & G_GEOM_WITHER, 19679971Sobrien ("Non-withering geom in class %s", mp->name)); 19779971Sobrien g_topology_sleep(mp, 1); 19879971Sobrien } 19979971Sobrien G_VALID_CLASS(mp); 20079971Sobrien if (mp->fini != NULL) 20179971Sobrien mp->fini(mp); 20279971Sobrien LIST_REMOVE(mp, class); 20379971Sobrien g_topology_unlock(); 20479971Sobrien 205223328Sgavin return (0); 206223328Sgavin} 20779971Sobrien 20879971Sobrienint 20979971Sobrieng_modevent(module_t mod, int type, void *data) 21079971Sobrien{ 21179971Sobrien struct g_hh00 *hh; 21279971Sobrien int error; 21379971Sobrien static int g_ignition; 21479971Sobrien struct g_class *mp; 21579971Sobrien 21679971Sobrien mp = data; 21779971Sobrien if (mp->version != G_VERSION) { 21879971Sobrien printf("GEOM class %s has Wrong version %x\n", 21979971Sobrien mp->name, mp->version); 22079971Sobrien return (EINVAL); 22179971Sobrien } 22279971Sobrien if (!g_ignition) { 22379971Sobrien g_ignition++; 22479971Sobrien g_init(); 22579971Sobrien } 22679971Sobrien error = EOPNOTSUPP; 22779971Sobrien switch (type) { 22879971Sobrien case MOD_LOAD: 22979971Sobrien g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); 23079971Sobrien hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 23179971Sobrien hh->mp = mp; 232223328Sgavin /* 23379971Sobrien * Once the system is not cold, MOD_LOAD calls will be 23479971Sobrien * from the userland and the g_event thread will be able 23579971Sobrien * to acknowledge their completion. 23679971Sobrien */ 23779971Sobrien if (cold) { 23879971Sobrien hh->post = 1; 23979971Sobrien error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 24079971Sobrien } else { 24179971Sobrien error = g_waitfor_event(g_load_class, hh, M_WAITOK, 242142129Smikeh NULL); 243142129Smikeh if (error == 0) 24479971Sobrien error = hh->error; 24579971Sobrien g_free(hh); 246223328Sgavin } 247223328Sgavin break; 248223328Sgavin case MOD_UNLOAD: 249223328Sgavin g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); 250223328Sgavin DROP_GIANT(); 251223328Sgavin error = g_unload_class(mp); 252223328Sgavin PICKUP_GIANT(); 253 if (error == 0) { 254 KASSERT(LIST_EMPTY(&mp->geom), 255 ("Unloaded class (%s) still has geom", mp->name)); 256 } 257 break; 258 } 259 return (error); 260} 261 262static void 263g_retaste_event(void *arg, int flag) 264{ 265 struct g_class *cp, *mp; 266 struct g_geom *gp, *gp2; 267 struct g_hh00 *hh; 268 struct g_provider *pp; 269 270 g_topology_assert(); 271 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 272 return; 273 if (g_shutdown) 274 return; 275 276 hh = arg; 277 mp = hh->mp; 278 hh->error = 0; 279 if (hh->post) { 280 g_free(hh); 281 hh = NULL; 282 } 283 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); 284 285 LIST_FOREACH(cp, &g_classes, class) { 286 LIST_FOREACH(gp, &cp->geom, geom) { 287 LIST_FOREACH(pp, &gp->provider, provider) { 288 if (pp->acr || pp->acw || pp->ace) 289 continue; 290 LIST_FOREACH(gp2, &mp->geom, geom) { 291 if (!strcmp(pp->name, gp2->name)) 292 break; 293 } 294 if (gp2 != NULL) 295 g_wither_geom(gp2, ENXIO); 296 mp->taste(mp, pp, 0); 297 g_topology_assert(); 298 } 299 } 300 } 301} 302 303int 304g_retaste(struct g_class *mp) 305{ 306 struct g_hh00 *hh; 307 int error; 308 309 if (mp->taste == NULL) 310 return (EINVAL); 311 312 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 313 hh->mp = mp; 314 315 if (cold) { 316 hh->post = 1; 317 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); 318 } else { 319 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); 320 if (error == 0) 321 error = hh->error; 322 g_free(hh); 323 } 324 325 return (error); 326} 327 328struct g_geom * 329g_new_geomf(struct g_class *mp, const char *fmt, ...) 330{ 331 struct g_geom *gp; 332 va_list ap; 333 struct sbuf *sb; 334 335 g_topology_assert(); 336 G_VALID_CLASS(mp); 337 sb = sbuf_new_auto(); 338 va_start(ap, fmt); 339 sbuf_vprintf(sb, fmt, ap); 340 va_end(ap); 341 sbuf_finish(sb); 342 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 343 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 344 gp->class = mp; 345 gp->rank = 1; 346 LIST_INIT(&gp->consumer); 347 LIST_INIT(&gp->provider); 348 LIST_INSERT_HEAD(&mp->geom, gp, geom); 349 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 350 strcpy(gp->name, sbuf_data(sb)); 351 sbuf_delete(sb); 352 /* Fill in defaults from class */ 353 gp->start = mp->start; 354 gp->spoiled = mp->spoiled; 355 gp->attrchanged = mp->attrchanged; 356 gp->providergone = mp->providergone; 357 gp->dumpconf = mp->dumpconf; 358 gp->access = mp->access; 359 gp->orphan = mp->orphan; 360 gp->ioctl = mp->ioctl; 361 gp->resize = mp->resize; 362 return (gp); 363} 364 365void 366g_destroy_geom(struct g_geom *gp) 367{ 368 369 g_topology_assert(); 370 G_VALID_GEOM(gp); 371 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 372 KASSERT(LIST_EMPTY(&gp->consumer), 373 ("g_destroy_geom(%s) with consumer(s) [%p]", 374 gp->name, LIST_FIRST(&gp->consumer))); 375 KASSERT(LIST_EMPTY(&gp->provider), 376 ("g_destroy_geom(%s) with provider(s) [%p]", 377 gp->name, LIST_FIRST(&gp->provider))); 378 g_cancel_event(gp); 379 LIST_REMOVE(gp, geom); 380 TAILQ_REMOVE(&geoms, gp, geoms); 381 g_free(gp->name); 382 g_free(gp); 383} 384 385/* 386 * This function is called (repeatedly) until the geom has withered away. 387 */ 388void 389g_wither_geom(struct g_geom *gp, int error) 390{ 391 struct g_provider *pp; 392 393 g_topology_assert(); 394 G_VALID_GEOM(gp); 395 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 396 if (!(gp->flags & G_GEOM_WITHER)) { 397 gp->flags |= G_GEOM_WITHER; 398 LIST_FOREACH(pp, &gp->provider, provider) 399 if (!(pp->flags & G_PF_ORPHAN)) 400 g_orphan_provider(pp, error); 401 } 402 g_do_wither(); 403} 404 405/* 406 * Convenience function to destroy a particular provider. 407 */ 408void 409g_wither_provider(struct g_provider *pp, int error) 410{ 411 412 pp->flags |= G_PF_WITHER; 413 if (!(pp->flags & G_PF_ORPHAN)) 414 g_orphan_provider(pp, error); 415} 416 417/* 418 * This function is called (repeatedly) until the has withered away. 419 */ 420void 421g_wither_geom_close(struct g_geom *gp, int error) 422{ 423 struct g_consumer *cp; 424 425 g_topology_assert(); 426 G_VALID_GEOM(gp); 427 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 428 LIST_FOREACH(cp, &gp->consumer, consumer) 429 if (cp->acr || cp->acw || cp->ace) 430 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 431 g_wither_geom(gp, error); 432} 433 434/* 435 * This function is called (repeatedly) until we cant wash away more 436 * withered bits at present. Return value contains two bits. Bit 0 437 * set means "withering stuff we can't wash now", bit 1 means "call 438 * me again, there may be stuff I didn't get the first time around. 439 */ 440int 441g_wither_washer() 442{ 443 struct g_class *mp; 444 struct g_geom *gp, *gp2; 445 struct g_provider *pp, *pp2; 446 struct g_consumer *cp, *cp2; 447 int result; 448 449 result = 0; 450 g_topology_assert(); 451 LIST_FOREACH(mp, &g_classes, class) { 452 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 453 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 454 if (!(pp->flags & G_PF_WITHER)) 455 continue; 456 if (LIST_EMPTY(&pp->consumers)) 457 g_destroy_provider(pp); 458 else 459 result |= 1; 460 } 461 if (!(gp->flags & G_GEOM_WITHER)) 462 continue; 463 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 464 if (LIST_EMPTY(&pp->consumers)) 465 g_destroy_provider(pp); 466 else 467 result |= 1; 468 } 469 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 470 if (cp->acr || cp->acw || cp->ace) { 471 result |= 1; 472 continue; 473 } 474 if (cp->provider != NULL) 475 g_detach(cp); 476 g_destroy_consumer(cp); 477 result |= 2; 478 } 479 if (LIST_EMPTY(&gp->provider) && 480 LIST_EMPTY(&gp->consumer)) 481 g_destroy_geom(gp); 482 else 483 result |= 1; 484 } 485 } 486 return (result); 487} 488 489struct g_consumer * 490g_new_consumer(struct g_geom *gp) 491{ 492 struct g_consumer *cp; 493 494 g_topology_assert(); 495 G_VALID_GEOM(gp); 496 KASSERT(!(gp->flags & G_GEOM_WITHER), 497 ("g_new_consumer on WITHERing geom(%s) (class %s)", 498 gp->name, gp->class->name)); 499 KASSERT(gp->orphan != NULL, 500 ("g_new_consumer on geom(%s) (class %s) without orphan", 501 gp->name, gp->class->name)); 502 503 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 504 cp->geom = gp; 505 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 506 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 507 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 508 return(cp); 509} 510 511void 512g_destroy_consumer(struct g_consumer *cp) 513{ 514 struct g_geom *gp; 515 516 g_topology_assert(); 517 G_VALID_CONSUMER(cp); 518 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 519 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 520 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 521 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 522 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 523 g_cancel_event(cp); 524 gp = cp->geom; 525 LIST_REMOVE(cp, consumer); 526 devstat_remove_entry(cp->stat); 527 g_free(cp); 528 if (gp->flags & G_GEOM_WITHER) 529 g_do_wither(); 530} 531 532static void 533g_new_provider_event(void *arg, int flag) 534{ 535 struct g_class *mp; 536 struct g_provider *pp; 537 struct g_consumer *cp; 538 539 g_topology_assert(); 540 if (flag == EV_CANCEL) 541 return; 542 if (g_shutdown) 543 return; 544 pp = arg; 545 G_VALID_PROVIDER(pp); 546 KASSERT(!(pp->flags & G_PF_WITHER), 547 ("g_new_provider_event but withered")); 548 LIST_FOREACH(mp, &g_classes, class) { 549 if (mp->taste == NULL) 550 continue; 551 LIST_FOREACH(cp, &pp->consumers, consumers) 552 if (cp->geom->class == mp) 553 break; 554 if (cp != NULL) 555 continue; 556 mp->taste(mp, pp, 0); 557 g_topology_assert(); 558 } 559} 560 561 562struct g_provider * 563g_new_providerf(struct g_geom *gp, const char *fmt, ...) 564{ 565 struct g_provider *pp; 566 struct sbuf *sb; 567 va_list ap; 568 569 g_topology_assert(); 570 G_VALID_GEOM(gp); 571 KASSERT(gp->access != NULL, 572 ("new provider on geom(%s) without ->access (class %s)", 573 gp->name, gp->class->name)); 574 KASSERT(gp->start != NULL, 575 ("new provider on geom(%s) without ->start (class %s)", 576 gp->name, gp->class->name)); 577 KASSERT(!(gp->flags & G_GEOM_WITHER), 578 ("new provider on WITHERing geom(%s) (class %s)", 579 gp->name, gp->class->name)); 580 sb = sbuf_new_auto(); 581 va_start(ap, fmt); 582 sbuf_vprintf(sb, fmt, ap); 583 va_end(ap); 584 sbuf_finish(sb); 585 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 586 pp->name = (char *)(pp + 1); 587 strcpy(pp->name, sbuf_data(sb)); 588 sbuf_delete(sb); 589 LIST_INIT(&pp->consumers); 590 pp->error = ENXIO; 591 pp->geom = gp; 592 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 593 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 594 LIST_INSERT_HEAD(&gp->provider, pp, provider); 595 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 596 return (pp); 597} 598 599void 600g_error_provider(struct g_provider *pp, int error) 601{ 602 603 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 604 pp->error = error; 605} 606 607static void 608g_resize_provider_event(void *arg, int flag) 609{ 610 struct g_hh00 *hh; 611 struct g_class *mp; 612 struct g_geom *gp; 613 struct g_provider *pp; 614 struct g_consumer *cp, *cp2; 615 off_t size; 616 617 g_topology_assert(); 618 if (g_shutdown) 619 return; 620 621 hh = arg; 622 pp = hh->pp; 623 size = hh->size; 624 625 G_VALID_PROVIDER(pp); 626 g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); 627 628 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 629 gp = cp->geom; 630 if (gp->resize == NULL && size < pp->mediasize) 631 cp->geom->orphan(cp); 632 } 633 634 pp->mediasize = size; 635 636 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 637 gp = cp->geom; 638 if (gp->resize != NULL) 639 gp->resize(cp); 640 } 641 642 /* 643 * After resizing, the previously invalid GEOM class metadata 644 * might become valid. This means we should retaste. 645 */ 646 LIST_FOREACH(mp, &g_classes, class) { 647 if (mp->taste == NULL) 648 continue; 649 LIST_FOREACH(cp, &pp->consumers, consumers) 650 if (cp->geom->class == mp) 651 break; 652 if (cp != NULL) 653 continue; 654 mp->taste(mp, pp, 0); 655 g_topology_assert(); 656 } 657} 658 659void 660g_resize_provider(struct g_provider *pp, off_t size) 661{ 662 struct g_hh00 *hh; 663 664 G_VALID_PROVIDER(pp); 665 666 if (size == pp->mediasize) 667 return; 668 669 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 670 hh->pp = pp; 671 hh->size = size; 672 g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); 673} 674 675struct g_provider * 676g_provider_by_name(char const *arg) 677{ 678 struct g_class *cp; 679 struct g_geom *gp; 680 struct g_provider *pp; 681 682 LIST_FOREACH(cp, &g_classes, class) { 683 LIST_FOREACH(gp, &cp->geom, geom) { 684 LIST_FOREACH(pp, &gp->provider, provider) { 685 if (!strcmp(arg, pp->name)) 686 return (pp); 687 } 688 } 689 } 690 return (NULL); 691} 692 693void 694g_destroy_provider(struct g_provider *pp) 695{ 696 struct g_geom *gp; 697 698 g_topology_assert(); 699 G_VALID_PROVIDER(pp); 700 KASSERT(LIST_EMPTY(&pp->consumers), 701 ("g_destroy_provider but attached")); 702 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 703 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 704 KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); 705 g_cancel_event(pp); 706 LIST_REMOVE(pp, provider); 707 gp = pp->geom; 708 devstat_remove_entry(pp->stat); 709 /* 710 * If a callback was provided, send notification that the provider 711 * is now gone. 712 */ 713 if (gp->providergone != NULL) 714 gp->providergone(pp); 715 716 g_free(pp); 717 if ((gp->flags & G_GEOM_WITHER)) 718 g_do_wither(); 719} 720 721/* 722 * We keep the "geoms" list sorted by topological order (== increasing 723 * numerical rank) at all times. 724 * When an attach is done, the attaching geoms rank is invalidated 725 * and it is moved to the tail of the list. 726 * All geoms later in the sequence has their ranks reevaluated in 727 * sequence. If we cannot assign rank to a geom because it's 728 * prerequisites do not have rank, we move that element to the tail 729 * of the sequence with invalid rank as well. 730 * At some point we encounter our original geom and if we stil fail 731 * to assign it a rank, there must be a loop and we fail back to 732 * g_attach() which detach again and calls redo_rank again 733 * to fix up the damage. 734 * It would be much simpler code wise to do it recursively, but we 735 * can't risk that on the kernel stack. 736 */ 737 738static int 739redo_rank(struct g_geom *gp) 740{ 741 struct g_consumer *cp; 742 struct g_geom *gp1, *gp2; 743 int n, m; 744 745 g_topology_assert(); 746 G_VALID_GEOM(gp); 747 748 /* Invalidate this geoms rank and move it to the tail */ 749 gp1 = TAILQ_NEXT(gp, geoms); 750 if (gp1 != NULL) { 751 gp->rank = 0; 752 TAILQ_REMOVE(&geoms, gp, geoms); 753 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 754 } else { 755 gp1 = gp; 756 } 757 758 /* re-rank the rest of the sequence */ 759 for (; gp1 != NULL; gp1 = gp2) { 760 gp1->rank = 0; 761 m = 1; 762 LIST_FOREACH(cp, &gp1->consumer, consumer) { 763 if (cp->provider == NULL) 764 continue; 765 n = cp->provider->geom->rank; 766 if (n == 0) { 767 m = 0; 768 break; 769 } else if (n >= m) 770 m = n + 1; 771 } 772 gp1->rank = m; 773 gp2 = TAILQ_NEXT(gp1, geoms); 774 775 /* got a rank, moving on */ 776 if (m != 0) 777 continue; 778 779 /* no rank to original geom means loop */ 780 if (gp == gp1) 781 return (ELOOP); 782 783 /* no rank, put it at the end move on */ 784 TAILQ_REMOVE(&geoms, gp1, geoms); 785 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 786 } 787 return (0); 788} 789 790int 791g_attach(struct g_consumer *cp, struct g_provider *pp) 792{ 793 int error; 794 795 g_topology_assert(); 796 G_VALID_CONSUMER(cp); 797 G_VALID_PROVIDER(pp); 798 g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); 799 KASSERT(cp->provider == NULL, ("attach but attached")); 800 cp->provider = pp; 801 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 802 error = redo_rank(cp->geom); 803 if (error) { 804 LIST_REMOVE(cp, consumers); 805 cp->provider = NULL; 806 redo_rank(cp->geom); 807 } 808 return (error); 809} 810 811void 812g_detach(struct g_consumer *cp) 813{ 814 struct g_provider *pp; 815 816 g_topology_assert(); 817 G_VALID_CONSUMER(cp); 818 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 819 KASSERT(cp->provider != NULL, ("detach but not attached")); 820 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 821 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 822 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 823 KASSERT(cp->nstart == cp->nend, 824 ("detach with active requests")); 825 pp = cp->provider; 826 LIST_REMOVE(cp, consumers); 827 cp->provider = NULL; 828 if (pp->geom->flags & G_GEOM_WITHER) 829 g_do_wither(); 830 else if (pp->flags & G_PF_WITHER) 831 g_do_wither(); 832 redo_rank(cp->geom); 833} 834 835/* 836 * g_access() 837 * 838 * Access-check with delta values. The question asked is "can provider 839 * "cp" change the access counters by the relative amounts dc[rwe] ?" 840 */ 841 842int 843g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 844{ 845 struct g_provider *pp; 846 int pr,pw,pe; 847 int error; 848 849 g_topology_assert(); 850 G_VALID_CONSUMER(cp); 851 pp = cp->provider; 852 KASSERT(pp != NULL, ("access but not attached")); 853 G_VALID_PROVIDER(pp); 854 855 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 856 cp, pp->name, dcr, dcw, dce); 857 858 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 859 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 860 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 861 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 862 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 863 864 /* 865 * If our class cares about being spoiled, and we have been, we 866 * are probably just ahead of the event telling us that. Fail 867 * now rather than having to unravel this later. 868 */ 869 if (cp->geom->spoiled != NULL && cp->spoiled && 870 (dcr > 0 || dcw > 0 || dce > 0)) 871 return (ENXIO); 872 873 /* 874 * Figure out what counts the provider would have had, if this 875 * consumer had (r0w0e0) at this time. 876 */ 877 pr = pp->acr - cp->acr; 878 pw = pp->acw - cp->acw; 879 pe = pp->ace - cp->ace; 880 881 g_trace(G_T_ACCESS, 882 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 883 dcr, dcw, dce, 884 cp->acr, cp->acw, cp->ace, 885 pp->acr, pp->acw, pp->ace, 886 pp, pp->name); 887 888 /* If foot-shooting is enabled, any open on rank#1 is OK */ 889 if ((g_debugflags & 16) && pp->geom->rank == 1) 890 ; 891 /* If we try exclusive but already write: fail */ 892 else if (dce > 0 && pw > 0) 893 return (EPERM); 894 /* If we try write but already exclusive: fail */ 895 else if (dcw > 0 && pe > 0) 896 return (EPERM); 897 /* If we try to open more but provider is error'ed: fail */ 898 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) 899 return (pp->error); 900 901 /* Ok then... */ 902 903 error = pp->geom->access(pp, dcr, dcw, dce); 904 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 905 ("Geom provider %s::%s failed closing ->access()", 906 pp->geom->class->name, pp->name)); 907 if (!error) { 908 /* 909 * If we open first write, spoil any partner consumers. 910 * If we close last write and provider is not errored, 911 * trigger re-taste. 912 */ 913 if (pp->acw == 0 && dcw != 0) 914 g_spoil(pp, cp); 915 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 916 !(pp->geom->flags & G_GEOM_WITHER)) 917 g_post_event(g_new_provider_event, pp, M_WAITOK, 918 pp, NULL); 919 920 pp->acr += dcr; 921 pp->acw += dcw; 922 pp->ace += dce; 923 cp->acr += dcr; 924 cp->acw += dcw; 925 cp->ace += dce; 926 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 927 KASSERT(pp->sectorsize > 0, 928 ("Provider %s lacks sectorsize", pp->name)); 929 } 930 return (error); 931} 932 933int 934g_handleattr_int(struct bio *bp, const char *attribute, int val) 935{ 936 937 return (g_handleattr(bp, attribute, &val, sizeof val)); 938} 939 940int 941g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 942{ 943 944 return (g_handleattr(bp, attribute, &val, sizeof val)); 945} 946 947int 948g_handleattr_str(struct bio *bp, const char *attribute, const char *str) 949{ 950 951 return (g_handleattr(bp, attribute, str, 0)); 952} 953 954int 955g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) 956{ 957 int error = 0; 958 959 if (strcmp(bp->bio_attribute, attribute)) 960 return (0); 961 if (len == 0) { 962 bzero(bp->bio_data, bp->bio_length); 963 if (strlcpy(bp->bio_data, val, bp->bio_length) >= 964 bp->bio_length) { 965 printf("%s: %s bio_length %jd len %zu -> EFAULT\n", 966 __func__, bp->bio_to->name, 967 (intmax_t)bp->bio_length, strlen(val)); 968 error = EFAULT; 969 } 970 } else if (bp->bio_length == len) { 971 bcopy(val, bp->bio_data, len); 972 } else { 973 printf("%s: %s bio_length %jd len %d -> EFAULT\n", __func__, 974 bp->bio_to->name, (intmax_t)bp->bio_length, len); 975 error = EFAULT; 976 } 977 if (error == 0) 978 bp->bio_completed = bp->bio_length; 979 g_io_deliver(bp, error); 980 return (1); 981} 982 983int 984g_std_access(struct g_provider *pp, 985 int dr __unused, int dw __unused, int de __unused) 986{ 987 988 g_topology_assert(); 989 G_VALID_PROVIDER(pp); 990 return (0); 991} 992 993void 994g_std_done(struct bio *bp) 995{ 996 struct bio *bp2; 997 998 bp2 = bp->bio_parent; 999 if (bp2->bio_error == 0) 1000 bp2->bio_error = bp->bio_error; 1001 bp2->bio_completed += bp->bio_completed; 1002 g_destroy_bio(bp); 1003 bp2->bio_inbed++; 1004 if (bp2->bio_children == bp2->bio_inbed) 1005 g_io_deliver(bp2, bp2->bio_error); 1006} 1007 1008/* XXX: maybe this is only g_slice_spoiled */ 1009 1010void 1011g_std_spoiled(struct g_consumer *cp) 1012{ 1013 struct g_geom *gp; 1014 struct g_provider *pp; 1015 1016 g_topology_assert(); 1017 G_VALID_CONSUMER(cp); 1018 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 1019 g_detach(cp); 1020 gp = cp->geom; 1021 LIST_FOREACH(pp, &gp->provider, provider) 1022 g_orphan_provider(pp, ENXIO); 1023 g_destroy_consumer(cp); 1024 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 1025 g_destroy_geom(gp); 1026 else 1027 gp->flags |= G_GEOM_WITHER; 1028} 1029 1030/* 1031 * Spoiling happens when a provider is opened for writing, but consumers 1032 * which are configured by in-band data are attached (slicers for instance). 1033 * Since the write might potentially change the in-band data, such consumers 1034 * need to re-evaluate their existence after the writing session closes. 1035 * We do this by (offering to) tear them down when the open for write happens 1036 * in return for a re-taste when it closes again. 1037 * Together with the fact that such consumers grab an 'e' bit whenever they 1038 * are open, regardless of mode, this ends up DTRT. 1039 */ 1040 1041static void 1042g_spoil_event(void *arg, int flag) 1043{ 1044 struct g_provider *pp; 1045 struct g_consumer *cp, *cp2; 1046 1047 g_topology_assert(); 1048 if (flag == EV_CANCEL) 1049 return; 1050 pp = arg; 1051 G_VALID_PROVIDER(pp); 1052 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 1053 cp2 = LIST_NEXT(cp, consumers); 1054 if (!cp->spoiled) 1055 continue; 1056 cp->spoiled = 0; 1057 if (cp->geom->spoiled == NULL) 1058 continue; 1059 cp->geom->spoiled(cp); 1060 g_topology_assert(); 1061 } 1062} 1063 1064void 1065g_spoil(struct g_provider *pp, struct g_consumer *cp) 1066{ 1067 struct g_consumer *cp2; 1068 1069 g_topology_assert(); 1070 G_VALID_PROVIDER(pp); 1071 G_VALID_CONSUMER(cp); 1072 1073 LIST_FOREACH(cp2, &pp->consumers, consumers) { 1074 if (cp2 == cp) 1075 continue; 1076/* 1077 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 1078 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 1079*/ 1080 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 1081 cp2->spoiled++; 1082 } 1083 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 1084} 1085 1086int 1087g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 1088{ 1089 int error, i; 1090 1091 i = len; 1092 error = g_io_getattr(attr, cp, &i, var); 1093 if (error) 1094 return (error); 1095 if (i != len) 1096 return (EINVAL); 1097 return (0); 1098} 1099 1100static int 1101g_get_device_prefix_len(const char *name) 1102{ 1103 int len; 1104 1105 if (strncmp(name, "ada", 3) == 0) 1106 len = 3; 1107 else if (strncmp(name, "ad", 2) == 0) 1108 len = 2; 1109 else 1110 return (0); 1111 if (name[len] < '0' || name[len] > '9') 1112 return (0); 1113 do { 1114 len++; 1115 } while (name[len] >= '0' && name[len] <= '9'); 1116 return (len); 1117} 1118 1119int 1120g_compare_names(const char *namea, const char *nameb) 1121{ 1122 int deva, devb; 1123 1124 if (strcmp(namea, nameb) == 0) 1125 return (1); 1126 deva = g_get_device_prefix_len(namea); 1127 if (deva == 0) 1128 return (0); 1129 devb = g_get_device_prefix_len(nameb); 1130 if (devb == 0) 1131 return (0); 1132 if (strcmp(namea + deva, nameb + devb) == 0) 1133 return (1); 1134 return (0); 1135} 1136 1137#if defined(DIAGNOSTIC) || defined(DDB) 1138/* 1139 * This function walks the mesh and returns a non-zero integer if it 1140 * finds the argument pointer is an object. The return value indicates 1141 * which type of object it is believed to be. If topology is not locked, 1142 * this function is potentially dangerous, but we don't assert that the 1143 * topology lock is held when called from debugger. 1144 */ 1145int 1146g_valid_obj(void const *ptr) 1147{ 1148 struct g_class *mp; 1149 struct g_geom *gp; 1150 struct g_consumer *cp; 1151 struct g_provider *pp; 1152 1153#ifdef KDB 1154 if (kdb_active == 0) 1155#endif 1156 g_topology_assert(); 1157 1158 LIST_FOREACH(mp, &g_classes, class) { 1159 if (ptr == mp) 1160 return (1); 1161 LIST_FOREACH(gp, &mp->geom, geom) { 1162 if (ptr == gp) 1163 return (2); 1164 LIST_FOREACH(cp, &gp->consumer, consumer) 1165 if (ptr == cp) 1166 return (3); 1167 LIST_FOREACH(pp, &gp->provider, provider) 1168 if (ptr == pp) 1169 return (4); 1170 } 1171 } 1172 return(0); 1173} 1174#endif 1175 1176#ifdef DDB 1177 1178#define gprintf(...) do { \ 1179 db_printf("%*s", indent, ""); \ 1180 db_printf(__VA_ARGS__); \ 1181} while (0) 1182#define gprintln(...) do { \ 1183 gprintf(__VA_ARGS__); \ 1184 db_printf("\n"); \ 1185} while (0) 1186 1187#define ADDFLAG(obj, flag, sflag) do { \ 1188 if ((obj)->flags & (flag)) { \ 1189 if (comma) \ 1190 strlcat(str, ",", size); \ 1191 strlcat(str, (sflag), size); \ 1192 comma = 1; \ 1193 } \ 1194} while (0) 1195 1196static char * 1197provider_flags_to_string(struct g_provider *pp, char *str, size_t size) 1198{ 1199 int comma = 0; 1200 1201 bzero(str, size); 1202 if (pp->flags == 0) { 1203 strlcpy(str, "NONE", size); 1204 return (str); 1205 } 1206 ADDFLAG(pp, G_PF_CANDELETE, "G_PF_CANDELETE"); 1207 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); 1208 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); 1209 return (str); 1210} 1211 1212static char * 1213geom_flags_to_string(struct g_geom *gp, char *str, size_t size) 1214{ 1215 int comma = 0; 1216 1217 bzero(str, size); 1218 if (gp->flags == 0) { 1219 strlcpy(str, "NONE", size); 1220 return (str); 1221 } 1222 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); 1223 return (str); 1224} 1225static void 1226db_show_geom_consumer(int indent, struct g_consumer *cp) 1227{ 1228 1229 if (indent == 0) { 1230 gprintln("consumer: %p", cp); 1231 gprintln(" class: %s (%p)", cp->geom->class->name, 1232 cp->geom->class); 1233 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); 1234 if (cp->provider == NULL) 1235 gprintln(" provider: none"); 1236 else { 1237 gprintln(" provider: %s (%p)", cp->provider->name, 1238 cp->provider); 1239 } 1240 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); 1241 gprintln(" spoiled: %d", cp->spoiled); 1242 gprintln(" nstart: %u", cp->nstart); 1243 gprintln(" nend: %u", cp->nend); 1244 } else { 1245 gprintf("consumer: %p (%s), access=r%dw%de%d", cp, 1246 cp->provider != NULL ? cp->provider->name : "none", 1247 cp->acr, cp->acw, cp->ace); 1248 if (cp->spoiled) 1249 db_printf(", spoiled=%d", cp->spoiled); 1250 db_printf("\n"); 1251 } 1252} 1253 1254static void 1255db_show_geom_provider(int indent, struct g_provider *pp) 1256{ 1257 struct g_consumer *cp; 1258 char flags[64]; 1259 1260 if (indent == 0) { 1261 gprintln("provider: %s (%p)", pp->name, pp); 1262 gprintln(" class: %s (%p)", pp->geom->class->name, 1263 pp->geom->class); 1264 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); 1265 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); 1266 gprintln(" sectorsize: %u", pp->sectorsize); 1267 gprintln(" stripesize: %u", pp->stripesize); 1268 gprintln(" stripeoffset: %u", pp->stripeoffset); 1269 gprintln(" access: r%dw%de%d", pp->acr, pp->acw, 1270 pp->ace); 1271 gprintln(" flags: %s (0x%04x)", 1272 provider_flags_to_string(pp, flags, sizeof(flags)), 1273 pp->flags); 1274 gprintln(" error: %d", pp->error); 1275 gprintln(" nstart: %u", pp->nstart); 1276 gprintln(" nend: %u", pp->nend); 1277 if (LIST_EMPTY(&pp->consumers)) 1278 gprintln(" consumers: none"); 1279 } else { 1280 gprintf("provider: %s (%p), access=r%dw%de%d", 1281 pp->name, pp, pp->acr, pp->acw, pp->ace); 1282 if (pp->flags != 0) { 1283 db_printf(", flags=%s (0x%04x)", 1284 provider_flags_to_string(pp, flags, sizeof(flags)), 1285 pp->flags); 1286 } 1287 db_printf("\n"); 1288 } 1289 if (!LIST_EMPTY(&pp->consumers)) { 1290 LIST_FOREACH(cp, &pp->consumers, consumers) { 1291 db_show_geom_consumer(indent + 2, cp); 1292 if (db_pager_quit) 1293 break; 1294 } 1295 } 1296} 1297 1298static void 1299db_show_geom_geom(int indent, struct g_geom *gp) 1300{ 1301 struct g_provider *pp; 1302 struct g_consumer *cp; 1303 char flags[64]; 1304 1305 if (indent == 0) { 1306 gprintln("geom: %s (%p)", gp->name, gp); 1307 gprintln(" class: %s (%p)", gp->class->name, gp->class); 1308 gprintln(" flags: %s (0x%04x)", 1309 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); 1310 gprintln(" rank: %d", gp->rank); 1311 if (LIST_EMPTY(&gp->provider)) 1312 gprintln(" providers: none"); 1313 if (LIST_EMPTY(&gp->consumer)) 1314 gprintln(" consumers: none"); 1315 } else { 1316 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); 1317 if (gp->flags != 0) { 1318 db_printf(", flags=%s (0x%04x)", 1319 geom_flags_to_string(gp, flags, sizeof(flags)), 1320 gp->flags); 1321 } 1322 db_printf("\n"); 1323 } 1324 if (!LIST_EMPTY(&gp->provider)) { 1325 LIST_FOREACH(pp, &gp->provider, provider) { 1326 db_show_geom_provider(indent + 2, pp); 1327 if (db_pager_quit) 1328 break; 1329 } 1330 } 1331 if (!LIST_EMPTY(&gp->consumer)) { 1332 LIST_FOREACH(cp, &gp->consumer, consumer) { 1333 db_show_geom_consumer(indent + 2, cp); 1334 if (db_pager_quit) 1335 break; 1336 } 1337 } 1338} 1339 1340static void 1341db_show_geom_class(struct g_class *mp) 1342{ 1343 struct g_geom *gp; 1344 1345 db_printf("class: %s (%p)\n", mp->name, mp); 1346 LIST_FOREACH(gp, &mp->geom, geom) { 1347 db_show_geom_geom(2, gp); 1348 if (db_pager_quit) 1349 break; 1350 } 1351} 1352 1353/* 1354 * Print the GEOM topology or the given object. 1355 */ 1356DB_SHOW_COMMAND(geom, db_show_geom) 1357{ 1358 struct g_class *mp; 1359 1360 if (!have_addr) { 1361 /* No address given, print the entire topology. */ 1362 LIST_FOREACH(mp, &g_classes, class) { 1363 db_show_geom_class(mp); 1364 db_printf("\n"); 1365 if (db_pager_quit) 1366 break; 1367 } 1368 } else { 1369 switch (g_valid_obj((void *)addr)) { 1370 case 1: 1371 db_show_geom_class((struct g_class *)addr); 1372 break; 1373 case 2: 1374 db_show_geom_geom(0, (struct g_geom *)addr); 1375 break; 1376 case 3: 1377 db_show_geom_consumer(0, (struct g_consumer *)addr); 1378 break; 1379 case 4: 1380 db_show_geom_provider(0, (struct g_provider *)addr); 1381 break; 1382 default: 1383 db_printf("Not a GEOM object.\n"); 1384 break; 1385 } 1386 } 1387} 1388 1389static void 1390db_print_bio_cmd(struct bio *bp) 1391{ 1392 db_printf(" cmd: "); 1393 switch (bp->bio_cmd) { 1394 case BIO_READ: db_printf("BIO_READ"); break; 1395 case BIO_WRITE: db_printf("BIO_WRITE"); break; 1396 case BIO_DELETE: db_printf("BIO_DELETE"); break; 1397 case BIO_GETATTR: db_printf("BIO_GETATTR"); break; 1398 case BIO_FLUSH: db_printf("BIO_FLUSH"); break; 1399 case BIO_CMD0: db_printf("BIO_CMD0"); break; 1400 case BIO_CMD1: db_printf("BIO_CMD1"); break; 1401 case BIO_CMD2: db_printf("BIO_CMD2"); break; 1402 default: db_printf("UNKNOWN"); break; 1403 } 1404 db_printf("\n"); 1405} 1406 1407static void 1408db_print_bio_flags(struct bio *bp) 1409{ 1410 int comma; 1411 1412 comma = 0; 1413 db_printf(" flags: "); 1414 if (bp->bio_flags & BIO_ERROR) { 1415 db_printf("BIO_ERROR"); 1416 comma = 1; 1417 } 1418 if (bp->bio_flags & BIO_DONE) { 1419 db_printf("%sBIO_DONE", (comma ? ", " : "")); 1420 comma = 1; 1421 } 1422 if (bp->bio_flags & BIO_ONQUEUE) 1423 db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); 1424 db_printf("\n"); 1425} 1426 1427/* 1428 * Print useful information in a BIO 1429 */ 1430DB_SHOW_COMMAND(bio, db_show_bio) 1431{ 1432 struct bio *bp; 1433 1434 if (have_addr) { 1435 bp = (struct bio *)addr; 1436 db_printf("BIO %p\n", bp); 1437 db_print_bio_cmd(bp); 1438 db_print_bio_flags(bp); 1439 db_printf(" cflags: 0x%hhx\n", bp->bio_cflags); 1440 db_printf(" pflags: 0x%hhx\n", bp->bio_pflags); 1441 db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); 1442 db_printf(" length: %jd\n", (intmax_t)bp->bio_length); 1443 db_printf(" bcount: %ld\n", bp->bio_bcount); 1444 db_printf(" resid: %ld\n", bp->bio_resid); 1445 db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); 1446 db_printf(" children: %u\n", bp->bio_children); 1447 db_printf(" inbed: %u\n", bp->bio_inbed); 1448 db_printf(" error: %d\n", bp->bio_error); 1449 db_printf(" parent: %p\n", bp->bio_parent); 1450 db_printf(" driver1: %p\n", bp->bio_driver1); 1451 db_printf(" driver2: %p\n", bp->bio_driver2); 1452 db_printf(" caller1: %p\n", bp->bio_caller1); 1453 db_printf(" caller2: %p\n", bp->bio_caller2); 1454 db_printf(" bio_from: %p\n", bp->bio_from); 1455 db_printf(" bio_to: %p\n", bp->bio_to); 1456 } 1457} 1458 1459#undef gprintf 1460#undef gprintln 1461#undef ADDFLAG 1462 1463#endif /* DDB */ 1464