geom_io.c revision 169283
1139749Simp/*- 297883Sgibbs * Copyright (c) 2002 Poul-Henning Kamp 397883Sgibbs * Copyright (c) 2002 Networks Associates Technology, Inc. 497883Sgibbs * All rights reserved. 597883Sgibbs * 6102685Sgibbs * This software was developed for the FreeBSD Project by Poul-Henning Kamp 797883Sgibbs * and NAI Labs, the Security Research Division of Network Associates, Inc. 897883Sgibbs * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 997883Sgibbs * DARPA CHATS research program. 1097883Sgibbs * 1197883Sgibbs * Redistribution and use in source and binary forms, with or without 1297883Sgibbs * modification, are permitted provided that the following conditions 1397883Sgibbs * are met: 1497883Sgibbs * 1. Redistributions of source code must retain the above copyright 1597883Sgibbs * notice, this list of conditions and the following disclaimer. 1697883Sgibbs * 2. Redistributions in binary form must reproduce the above copyright 1797883Sgibbs * notice, this list of conditions and the following disclaimer in the 1897883Sgibbs * documentation and/or other materials provided with the distribution. 1997883Sgibbs * 3. The names of the authors may not be used to endorse or promote 2097883Sgibbs * products derived from this software without specific prior written 2197883Sgibbs * permission. 2297883Sgibbs * 2397883Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 2497883Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2597883Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2697883Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2797883Sgibbs * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2897883Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2997883Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3097883Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3197883Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3297883Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33123579Sgibbs * SUCH DAMAGE. 3497883Sgibbs */ 3597883Sgibbs 3697883Sgibbs#include <sys/cdefs.h> 3797883Sgibbs__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 169283 2007-05-05 16:35:22Z pjd $"); 3897883Sgibbs 3997883Sgibbs#include <sys/param.h> 4097883Sgibbs#include <sys/systm.h> 4197883Sgibbs#include <sys/kernel.h> 4297883Sgibbs#include <sys/malloc.h> 4397883Sgibbs#include <sys/bio.h> 4497883Sgibbs#include <sys/ktr.h> 4597883Sgibbs#include <sys/proc.h> 46102685Sgibbs#include <sys/stack.h> 47102685Sgibbs 48102685Sgibbs#include <sys/errno.h> 4997883Sgibbs#include <geom/geom.h> 5097883Sgibbs#include <geom/geom_int.h> 5197883Sgibbs#include <sys/devicestat.h> 52129879Sphk 5397883Sgibbs#include <vm/uma.h> 54199260Sattilio 5597883Sgibbsstatic struct g_bioq g_bio_run_down; 56123579Sgibbsstatic struct g_bioq g_bio_run_up; 5797883Sgibbsstatic struct g_bioq g_bio_run_task; 5897883Sgibbs 5997883Sgibbsstatic u_int pace; 6097883Sgibbsstatic uma_zone_t biozone; 6197883Sgibbs 6297883Sgibbs#include <machine/atomic.h> 63119277Simp 64119277Simpstatic void 65119277Simpg_bioq_lock(struct g_bioq *bq) 66119277Simp{ 6797883Sgibbs 6897883Sgibbs mtx_lock(&bq->bio_queue_lock); 69119277Simp} 7097883Sgibbs 7197883Sgibbsstatic void 7297883Sgibbsg_bioq_unlock(struct g_bioq *bq) 7397883Sgibbs{ 7497883Sgibbs 7597883Sgibbs mtx_unlock(&bq->bio_queue_lock); 7697883Sgibbs} 7797883Sgibbs 7897883Sgibbs#if 0 7997883Sgibbsstatic void 8097883Sgibbsg_bioq_destroy(struct g_bioq *bq) 8197883Sgibbs{ 8297883Sgibbs 8397883Sgibbs mtx_destroy(&bq->bio_queue_lock); 8497883Sgibbs} 8597883Sgibbs#endif 8697883Sgibbs 8797883Sgibbsstatic void 8897883Sgibbsg_bioq_init(struct g_bioq *bq) 8997883Sgibbs{ 9097883Sgibbs 9197883Sgibbs TAILQ_INIT(&bq->bio_queue); 9297883Sgibbs mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 9397883Sgibbs} 9497883Sgibbs 9597883Sgibbsstatic struct bio * 9697883Sgibbsg_bioq_first(struct g_bioq *bq) 9797883Sgibbs{ 9897883Sgibbs struct bio *bp; 9997883Sgibbs 10097883Sgibbs bp = TAILQ_FIRST(&bq->bio_queue); 10197883Sgibbs if (bp != NULL) { 10297883Sgibbs KASSERT((bp->bio_flags & BIO_ONQUEUE), 10397883Sgibbs ("Bio not on queue bp=%p target %p", bp, bq)); 10497883Sgibbs bp->bio_flags &= ~BIO_ONQUEUE; 10597883Sgibbs TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 106195534Sscottl bq->bio_queue_length--; 107195534Sscottl } 10897883Sgibbs return (bp); 10997883Sgibbs} 11097883Sgibbs 11197883Sgibbsstruct bio * 11297883Sgibbsg_new_bio(void) 11397883Sgibbs{ 11497883Sgibbs struct bio *bp; 115195534Sscottl 116195534Sscottl bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 11797883Sgibbs#ifdef KTR 11897883Sgibbs if (KTR_COMPILE & KTR_GEOM) { 119153072Sru struct stack st; 12097883Sgibbs 12197883Sgibbs CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 12297883Sgibbs stack_save(&st); 12397883Sgibbs CTRSTACK(KTR_GEOM, &st, 3, 0); 12497883Sgibbs } 12597883Sgibbs#endif 12697883Sgibbs return (bp); 12797883Sgibbs} 12897883Sgibbs 12997883Sgibbsstruct bio * 13097883Sgibbsg_alloc_bio(void) 13197883Sgibbs{ 13297883Sgibbs struct bio *bp; 13397883Sgibbs 13497883Sgibbs bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 13597883Sgibbs#ifdef KTR 13697883Sgibbs if (KTR_COMPILE & KTR_GEOM) { 13797883Sgibbs struct stack st; 138123579Sgibbs 139168807Sscottl CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 14097883Sgibbs stack_save(&st); 14197883Sgibbs CTRSTACK(KTR_GEOM, &st, 3, 0); 14297883Sgibbs } 14397883Sgibbs#endif 14497883Sgibbs return (bp); 14597883Sgibbs} 146153165Sru 147102685Sgibbsvoid 148102685Sgibbsg_destroy_bio(struct bio *bp) 149102685Sgibbs{ 150102685Sgibbs#ifdef KTR 151123579Sgibbs if (KTR_COMPILE & KTR_GEOM) { 152123579Sgibbs struct stack st; 153123579Sgibbs 154123579Sgibbs CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 15597883Sgibbs stack_save(&st); 15697883Sgibbs CTRSTACK(KTR_GEOM, &st, 3, 0); 15797883Sgibbs } 15897883Sgibbs#endif 15997883Sgibbs uma_zfree(biozone, bp); 16097883Sgibbs} 16197883Sgibbs 16297883Sgibbsstruct bio * 16397883Sgibbsg_clone_bio(struct bio *bp) 16497883Sgibbs{ 16597883Sgibbs struct bio *bp2; 166123579Sgibbs 16797883Sgibbs bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 16897883Sgibbs if (bp2 != NULL) { 16997883Sgibbs bp2->bio_parent = bp; 17097883Sgibbs bp2->bio_cmd = bp->bio_cmd; 17197883Sgibbs bp2->bio_length = bp->bio_length; 172123579Sgibbs bp2->bio_offset = bp->bio_offset; 17397883Sgibbs bp2->bio_data = bp->bio_data; 17497883Sgibbs bp2->bio_attribute = bp->bio_attribute; 17597883Sgibbs bp->bio_children++; 17697883Sgibbs } 17797883Sgibbs#ifdef KTR 17897883Sgibbs if (KTR_COMPILE & KTR_GEOM) { 17997883Sgibbs struct stack st; 18097883Sgibbs 18197883Sgibbs CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 18297883Sgibbs stack_save(&st); 18397883Sgibbs CTRSTACK(KTR_GEOM, &st, 3, 0); 18497883Sgibbs } 18597883Sgibbs#endif 18697883Sgibbs return(bp2); 18797883Sgibbs} 18897883Sgibbs 18997883Sgibbsstruct bio * 19097883Sgibbsg_duplicate_bio(struct bio *bp) 19197883Sgibbs{ 19297883Sgibbs struct bio *bp2; 19397883Sgibbs 19497883Sgibbs bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 19597883Sgibbs bp2->bio_parent = bp; 196168807Sscottl bp2->bio_cmd = bp->bio_cmd; 197168807Sscottl bp2->bio_length = bp->bio_length; 19897883Sgibbs bp2->bio_offset = bp->bio_offset; 19997883Sgibbs bp2->bio_data = bp->bio_data; 20097883Sgibbs bp2->bio_attribute = bp->bio_attribute; 20197883Sgibbs bp->bio_children++; 202168807Sscottl#ifdef KTR 20397883Sgibbs if (KTR_COMPILE & KTR_GEOM) { 20497883Sgibbs struct stack st; 20597883Sgibbs 206168807Sscottl CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 20797883Sgibbs stack_save(&st); 208168807Sscottl CTRSTACK(KTR_GEOM, &st, 3, 0); 20997883Sgibbs } 21097883Sgibbs#endif 21197883Sgibbs return(bp2); 212168807Sscottl} 21397883Sgibbs 214168807Sscottlvoid 21597883Sgibbsg_io_init() 21697883Sgibbs{ 217123579Sgibbs 218123579Sgibbs g_bioq_init(&g_bio_run_down); 219123579Sgibbs g_bioq_init(&g_bio_run_up); 220123579Sgibbs g_bioq_init(&g_bio_run_task); 22197883Sgibbs biozone = uma_zcreate("g_bio", sizeof (struct bio), 222123579Sgibbs NULL, NULL, 223123579Sgibbs NULL, NULL, 22497883Sgibbs 0, 0); 22597883Sgibbs} 226123579Sgibbs 22797883Sgibbsint 228123579Sgibbsg_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 22997883Sgibbs{ 23097883Sgibbs struct bio *bp; 23197883Sgibbs int error; 232123579Sgibbs 23397883Sgibbs g_trace(G_T_BIO, "bio_getattr(%s)", attr); 234123579Sgibbs bp = g_alloc_bio(); 23597883Sgibbs bp->bio_cmd = BIO_GETATTR; 23697883Sgibbs bp->bio_done = NULL; 23797883Sgibbs bp->bio_attribute = attr; 23897883Sgibbs bp->bio_length = *len; 23997883Sgibbs bp->bio_data = ptr; 24097883Sgibbs g_io_request(bp, cp); 24197883Sgibbs error = biowait(bp, "ggetattr"); 24297883Sgibbs *len = bp->bio_completed; 24397883Sgibbs g_destroy_bio(bp); 24497883Sgibbs return (error); 24597883Sgibbs} 24697883Sgibbs 24797883Sgibbsint 24897883Sgibbsg_io_flush(struct g_consumer *cp) 24997883Sgibbs{ 25097883Sgibbs struct bio *bp; 25197883Sgibbs int error; 25297883Sgibbs 25397883Sgibbs g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 25497883Sgibbs bp = g_alloc_bio(); 25597883Sgibbs bp->bio_cmd = BIO_FLUSH; 25697883Sgibbs bp->bio_done = NULL; 25797883Sgibbs bp->bio_attribute = NULL; 25897883Sgibbs bp->bio_offset = cp->provider->mediasize; 25997883Sgibbs bp->bio_length = 0; 26097883Sgibbs bp->bio_data = NULL; 26197883Sgibbs g_io_request(bp, cp); 26297883Sgibbs error = biowait(bp, "gflush"); 263199260Sattilio g_destroy_bio(bp); 26497883Sgibbs return (error); 265107437Sscottl} 26697883Sgibbs 267107437Sscottlstatic int 26897883Sgibbsg_io_check(struct bio *bp) 26997883Sgibbs{ 27097883Sgibbs struct g_consumer *cp; 27197883Sgibbs struct g_provider *pp; 27297883Sgibbs 27397883Sgibbs cp = bp->bio_from; 27497883Sgibbs pp = bp->bio_to; 27597883Sgibbs 27697883Sgibbs /* Fail if access counters dont allow the operation */ 27797883Sgibbs switch(bp->bio_cmd) { 27897883Sgibbs case BIO_READ: 27997883Sgibbs case BIO_GETATTR: 28097883Sgibbs if (cp->acr == 0) 281 return (EPERM); 282 break; 283 case BIO_WRITE: 284 case BIO_DELETE: 285 case BIO_FLUSH: 286 if (cp->acw == 0) 287 return (EPERM); 288 break; 289 default: 290 return (EPERM); 291 } 292 /* if provider is marked for error, don't disturb. */ 293 if (pp->error) 294 return (pp->error); 295 296 switch(bp->bio_cmd) { 297 case BIO_READ: 298 case BIO_WRITE: 299 case BIO_DELETE: 300 /* Zero sectorsize is a probably lack of media */ 301 if (pp->sectorsize == 0) 302 return (ENXIO); 303 /* Reject I/O not on sector boundary */ 304 if (bp->bio_offset % pp->sectorsize) 305 return (EINVAL); 306 /* Reject I/O not integral sector long */ 307 if (bp->bio_length % pp->sectorsize) 308 return (EINVAL); 309 /* Reject requests before or past the end of media. */ 310 if (bp->bio_offset < 0) 311 return (EIO); 312 if (bp->bio_offset > pp->mediasize) 313 return (EIO); 314 break; 315 default: 316 break; 317 } 318 return (0); 319} 320 321void 322g_io_request(struct bio *bp, struct g_consumer *cp) 323{ 324 struct g_provider *pp; 325 326 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 327 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 328 pp = cp->provider; 329 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 330#ifdef DIAGNOSTIC 331 KASSERT(bp->bio_driver1 == NULL, 332 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 333 KASSERT(bp->bio_driver2 == NULL, 334 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 335 KASSERT(bp->bio_pflags == 0, 336 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 337 /* 338 * Remember consumer's private fields, so we can detect if they were 339 * modified by the provider. 340 */ 341 bp->_bio_caller1 = bp->bio_caller1; 342 bp->_bio_caller2 = bp->bio_caller2; 343 bp->_bio_cflags = bp->bio_cflags; 344#endif 345 346 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) { 347 KASSERT(bp->bio_data != NULL, 348 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd)); 349 } 350 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) { 351 KASSERT(bp->bio_data == NULL, 352 ("non-NULL bp->data in g_io_request(cmd=%hhu)", 353 bp->bio_cmd)); 354 } 355 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 356 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 357 ("wrong offset %jd for sectorsize %u", 358 bp->bio_offset, cp->provider->sectorsize)); 359 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 360 ("wrong length %jd for sectorsize %u", 361 bp->bio_length, cp->provider->sectorsize)); 362 } 363 364 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 365 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 366 367 bp->bio_from = cp; 368 bp->bio_to = pp; 369 bp->bio_error = 0; 370 bp->bio_completed = 0; 371 372 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 373 ("Bio already on queue bp=%p", bp)); 374 bp->bio_flags |= BIO_ONQUEUE; 375 376 binuptime(&bp->bio_t0); 377 378 /* 379 * The statistics collection is lockless, as such, but we 380 * can not update one instance of the statistics from more 381 * than one thread at a time, so grab the lock first. 382 */ 383 g_bioq_lock(&g_bio_run_down); 384 if (g_collectstats & 1) 385 devstat_start_transaction(pp->stat, &bp->bio_t0); 386 if (g_collectstats & 2) 387 devstat_start_transaction(cp->stat, &bp->bio_t0); 388 389 pp->nstart++; 390 cp->nstart++; 391 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 392 g_bio_run_down.bio_queue_length++; 393 g_bioq_unlock(&g_bio_run_down); 394 395 /* Pass it on down. */ 396 wakeup(&g_wait_down); 397} 398 399void 400g_io_deliver(struct bio *bp, int error) 401{ 402 struct g_consumer *cp; 403 struct g_provider *pp; 404 405 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 406 pp = bp->bio_to; 407 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 408#ifdef DIAGNOSTIC 409 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 410 ("bio_caller1 used by the provider %s", pp->name)); 411 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 412 ("bio_caller2 used by the provider %s", pp->name)); 413 KASSERT(bp->bio_cflags == bp->_bio_cflags, 414 ("bio_cflags used by the provider %s", pp->name)); 415#endif 416 cp = bp->bio_from; 417 if (cp == NULL) { 418 bp->bio_error = error; 419 bp->bio_done(bp); 420 return; 421 } 422 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 423 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 424 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 425 KASSERT(bp->bio_completed <= bp->bio_length, 426 ("bio_completed can't be greater than bio_length")); 427 428 g_trace(G_T_BIO, 429"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 430 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 431 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 432 433 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 434 ("Bio already on queue bp=%p", bp)); 435 436 /* 437 * XXX: next two doesn't belong here 438 */ 439 bp->bio_bcount = bp->bio_length; 440 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 441 442 /* 443 * The statistics collection is lockless, as such, but we 444 * can not update one instance of the statistics from more 445 * than one thread at a time, so grab the lock first. 446 */ 447 g_bioq_lock(&g_bio_run_up); 448 if (g_collectstats & 1) 449 devstat_end_transaction_bio(pp->stat, bp); 450 if (g_collectstats & 2) 451 devstat_end_transaction_bio(cp->stat, bp); 452 453 cp->nend++; 454 pp->nend++; 455 if (error != ENOMEM) { 456 bp->bio_error = error; 457 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 458 bp->bio_flags |= BIO_ONQUEUE; 459 g_bio_run_up.bio_queue_length++; 460 g_bioq_unlock(&g_bio_run_up); 461 wakeup(&g_wait_up); 462 return; 463 } 464 g_bioq_unlock(&g_bio_run_up); 465 466 if (bootverbose) 467 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 468 bp->bio_children = 0; 469 bp->bio_inbed = 0; 470 g_io_request(bp, cp); 471 pace++; 472 return; 473} 474 475void 476g_io_schedule_down(struct thread *tp __unused) 477{ 478 struct bio *bp; 479 off_t excess; 480 int error; 481 482 for(;;) { 483 g_bioq_lock(&g_bio_run_down); 484 bp = g_bioq_first(&g_bio_run_down); 485 if (bp == NULL) { 486 CTR0(KTR_GEOM, "g_down going to sleep"); 487 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 488 PRIBIO | PDROP, "-", hz/10); 489 continue; 490 } 491 CTR0(KTR_GEOM, "g_down has work to do"); 492 g_bioq_unlock(&g_bio_run_down); 493 if (pace > 0) { 494 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 495 pause("g_down", hz/10); 496 pace--; 497 } 498 error = g_io_check(bp); 499 if (error) { 500 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 501 "%s returned %d", bp, bp->bio_to->name, error); 502 g_io_deliver(bp, error); 503 continue; 504 } 505 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 506 bp->bio_to->name); 507 switch (bp->bio_cmd) { 508 case BIO_READ: 509 case BIO_WRITE: 510 case BIO_DELETE: 511 /* Truncate requests to the end of providers media. */ 512 /* 513 * XXX: What if we truncate because of offset being 514 * bad, not length? 515 */ 516 excess = bp->bio_offset + bp->bio_length; 517 if (excess > bp->bio_to->mediasize) { 518 excess -= bp->bio_to->mediasize; 519 bp->bio_length -= excess; 520 if (excess > 0) 521 CTR3(KTR_GEOM, "g_down truncated bio " 522 "%p provider %s by %d", bp, 523 bp->bio_to->name, excess); 524 } 525 /* Deliver zero length transfers right here. */ 526 if (bp->bio_length == 0) { 527 g_io_deliver(bp, 0); 528 CTR2(KTR_GEOM, "g_down terminated 0-length " 529 "bp %p provider %s", bp, bp->bio_to->name); 530 continue; 531 } 532 break; 533 default: 534 break; 535 } 536 THREAD_NO_SLEEPING(); 537 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 538 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 539 bp->bio_length); 540 bp->bio_to->geom->start(bp); 541 THREAD_SLEEPING_OK(); 542 } 543} 544 545void 546bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 547{ 548 bp->bio_task = func; 549 bp->bio_task_arg = arg; 550 /* 551 * The taskqueue is actually just a second queue off the "up" 552 * queue, so we use the same lock. 553 */ 554 g_bioq_lock(&g_bio_run_up); 555 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 556 ("Bio already on queue bp=%p target taskq", bp)); 557 bp->bio_flags |= BIO_ONQUEUE; 558 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 559 g_bio_run_task.bio_queue_length++; 560 wakeup(&g_wait_up); 561 g_bioq_unlock(&g_bio_run_up); 562} 563 564 565void 566g_io_schedule_up(struct thread *tp __unused) 567{ 568 struct bio *bp; 569 for(;;) { 570 g_bioq_lock(&g_bio_run_up); 571 bp = g_bioq_first(&g_bio_run_task); 572 if (bp != NULL) { 573 g_bioq_unlock(&g_bio_run_up); 574 THREAD_NO_SLEEPING(); 575 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 576 bp->bio_task(bp->bio_task_arg); 577 THREAD_SLEEPING_OK(); 578 continue; 579 } 580 bp = g_bioq_first(&g_bio_run_up); 581 if (bp != NULL) { 582 g_bioq_unlock(&g_bio_run_up); 583 THREAD_NO_SLEEPING(); 584 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 585 "%ld len %ld", bp, bp->bio_to->name, 586 bp->bio_offset, bp->bio_length); 587 biodone(bp); 588 THREAD_SLEEPING_OK(); 589 continue; 590 } 591 CTR0(KTR_GEOM, "g_up going to sleep"); 592 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 593 PRIBIO | PDROP, "-", hz/10); 594 } 595} 596 597void * 598g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 599{ 600 struct bio *bp; 601 void *ptr; 602 int errorc; 603 604 KASSERT(length > 0 && length >= cp->provider->sectorsize && 605 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 606 (intmax_t)length)); 607 608 bp = g_alloc_bio(); 609 bp->bio_cmd = BIO_READ; 610 bp->bio_done = NULL; 611 bp->bio_offset = offset; 612 bp->bio_length = length; 613 ptr = g_malloc(length, M_WAITOK); 614 bp->bio_data = ptr; 615 g_io_request(bp, cp); 616 errorc = biowait(bp, "gread"); 617 if (error != NULL) 618 *error = errorc; 619 g_destroy_bio(bp); 620 if (errorc) { 621 g_free(ptr); 622 ptr = NULL; 623 } 624 return (ptr); 625} 626 627int 628g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 629{ 630 struct bio *bp; 631 int error; 632 633 KASSERT(length > 0 && length >= cp->provider->sectorsize && 634 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 635 (intmax_t)length)); 636 637 bp = g_alloc_bio(); 638 bp->bio_cmd = BIO_WRITE; 639 bp->bio_done = NULL; 640 bp->bio_offset = offset; 641 bp->bio_length = length; 642 bp->bio_data = ptr; 643 g_io_request(bp, cp); 644 error = biowait(bp, "gwrite"); 645 g_destroy_bio(bp); 646 return (error); 647} 648 649int 650g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 651{ 652 struct bio *bp; 653 int error; 654 655 KASSERT(length > 0 && length >= cp->provider->sectorsize && 656 length <= MAXPHYS, ("g_delete_data(): invalid length %jd", 657 (intmax_t)length)); 658 659 bp = g_alloc_bio(); 660 bp->bio_cmd = BIO_DELETE; 661 bp->bio_done = NULL; 662 bp->bio_offset = offset; 663 bp->bio_length = length; 664 bp->bio_data = NULL; 665 g_io_request(bp, cp); 666 error = biowait(bp, "gdelete"); 667 g_destroy_bio(bp); 668 return (error); 669} 670 671void 672g_print_bio(struct bio *bp) 673{ 674 const char *pname, *cmd = NULL; 675 676 if (bp->bio_to != NULL) 677 pname = bp->bio_to->name; 678 else 679 pname = "[unknown]"; 680 681 switch (bp->bio_cmd) { 682 case BIO_GETATTR: 683 cmd = "GETATTR"; 684 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 685 return; 686 case BIO_FLUSH: 687 cmd = "FLUSH"; 688 printf("%s[%s]", pname, cmd); 689 return; 690 case BIO_READ: 691 cmd = "READ"; 692 case BIO_WRITE: 693 if (cmd == NULL) 694 cmd = "WRITE"; 695 case BIO_DELETE: 696 if (cmd == NULL) 697 cmd = "DELETE"; 698 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 699 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 700 return; 701 default: 702 cmd = "UNKNOWN"; 703 printf("%s[%s()]", pname, cmd); 704 return; 705 } 706 /* NOTREACHED */ 707} 708