geom_io.c revision 149576
1/*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 149576 2005-08-29 11:39:24Z pjd $"); 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/bio.h> 44#include <sys/ktr.h> 45#include <sys/stack.h> 46 47#include <sys/errno.h> 48#include <geom/geom.h> 49#include <geom/geom_int.h> 50#include <sys/devicestat.h> 51 52#include <vm/uma.h> 53 54static struct g_bioq g_bio_run_down; 55static struct g_bioq g_bio_run_up; 56static struct g_bioq g_bio_run_task; 57 58static u_int pace; 59static uma_zone_t biozone; 60 61#include <machine/atomic.h> 62 63static void 64g_bioq_lock(struct g_bioq *bq) 65{ 66 67 mtx_lock(&bq->bio_queue_lock); 68} 69 70static void 71g_bioq_unlock(struct g_bioq *bq) 72{ 73 74 mtx_unlock(&bq->bio_queue_lock); 75} 76 77#if 0 78static void 79g_bioq_destroy(struct g_bioq *bq) 80{ 81 82 mtx_destroy(&bq->bio_queue_lock); 83} 84#endif 85 86static void 87g_bioq_init(struct g_bioq *bq) 88{ 89 90 TAILQ_INIT(&bq->bio_queue); 91 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 92} 93 94static struct bio * 95g_bioq_first(struct g_bioq *bq) 96{ 97 struct bio *bp; 98 99 bp = TAILQ_FIRST(&bq->bio_queue); 100 if (bp != NULL) { 101 KASSERT((bp->bio_flags & BIO_ONQUEUE), 102 ("Bio not on queue bp=%p target %p", bp, bq)); 103 bp->bio_flags &= ~BIO_ONQUEUE; 104 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 105 bq->bio_queue_length--; 106 } 107 return (bp); 108} 109 110struct bio * 111g_new_bio(void) 112{ 113 struct bio *bp; 114 115 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 116#ifdef KTR 117 if (KTR_COMPILE & KTR_GEOM) { 118 struct stack st; 119 120 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 121 stack_save(&st); 122 CTRSTACK(KTR_GEOM, &st, 3, 0); 123 } 124#endif 125 return (bp); 126} 127 128struct bio * 129g_alloc_bio(void) 130{ 131 struct bio *bp; 132 133 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 134#ifdef KTR 135 if (KTR_COMPILE & KTR_GEOM) { 136 struct stack st; 137 138 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 139 stack_save(&st); 140 CTRSTACK(KTR_GEOM, &st, 3, 0); 141 } 142#endif 143 return (bp); 144} 145 146void 147g_destroy_bio(struct bio *bp) 148{ 149#ifdef KTR 150 if (KTR_COMPILE & KTR_GEOM) { 151 struct stack st; 152 153 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 154 stack_save(&st); 155 CTRSTACK(KTR_GEOM, &st, 3, 0); 156 } 157#endif 158 uma_zfree(biozone, bp); 159} 160 161struct bio * 162g_clone_bio(struct bio *bp) 163{ 164 struct bio *bp2; 165 166 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 167 if (bp2 != NULL) { 168 bp2->bio_parent = bp; 169 bp2->bio_cmd = bp->bio_cmd; 170 bp2->bio_length = bp->bio_length; 171 bp2->bio_offset = bp->bio_offset; 172 bp2->bio_data = bp->bio_data; 173 bp2->bio_attribute = bp->bio_attribute; 174 bp->bio_children++; 175 } 176#ifdef KTR 177 if (KTR_COMPILE & KTR_GEOM) { 178 struct stack st; 179 180 CTR2(KTR_GEOM, "g_close_bio(%p): %p", bp, bp2); 181 stack_save(&st); 182 CTRSTACK(KTR_GEOM, &st, 3, 0); 183 } 184#endif 185 return(bp2); 186} 187 188void 189g_io_init() 190{ 191 192 g_bioq_init(&g_bio_run_down); 193 g_bioq_init(&g_bio_run_up); 194 g_bioq_init(&g_bio_run_task); 195 biozone = uma_zcreate("g_bio", sizeof (struct bio), 196 NULL, NULL, 197 NULL, NULL, 198 0, 0); 199} 200 201int 202g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 203{ 204 struct bio *bp; 205 int error; 206 207 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 208 bp = g_alloc_bio(); 209 bp->bio_cmd = BIO_GETATTR; 210 bp->bio_done = NULL; 211 bp->bio_attribute = attr; 212 bp->bio_length = *len; 213 bp->bio_data = ptr; 214 g_io_request(bp, cp); 215 error = biowait(bp, "ggetattr"); 216 *len = bp->bio_completed; 217 g_destroy_bio(bp); 218 return (error); 219} 220 221static int 222g_io_check(struct bio *bp) 223{ 224 struct g_consumer *cp; 225 struct g_provider *pp; 226 227 cp = bp->bio_from; 228 pp = bp->bio_to; 229 230 /* Fail if access counters dont allow the operation */ 231 switch(bp->bio_cmd) { 232 case BIO_READ: 233 case BIO_GETATTR: 234 if (cp->acr == 0) 235 return (EPERM); 236 break; 237 case BIO_WRITE: 238 case BIO_DELETE: 239 if (cp->acw == 0) 240 return (EPERM); 241 break; 242 default: 243 return (EPERM); 244 } 245 /* if provider is marked for error, don't disturb. */ 246 if (pp->error) 247 return (pp->error); 248 249 switch(bp->bio_cmd) { 250 case BIO_READ: 251 case BIO_WRITE: 252 case BIO_DELETE: 253 /* Zero sectorsize is a probably lack of media */ 254 if (pp->sectorsize == 0) 255 return (ENXIO); 256 /* Reject I/O not on sector boundary */ 257 if (bp->bio_offset % pp->sectorsize) 258 return (EINVAL); 259 /* Reject I/O not integral sector long */ 260 if (bp->bio_length % pp->sectorsize) 261 return (EINVAL); 262 /* Reject requests before or past the end of media. */ 263 if (bp->bio_offset < 0) 264 return (EIO); 265 if (bp->bio_offset > pp->mediasize) 266 return (EIO); 267 break; 268 default: 269 break; 270 } 271 return (0); 272} 273 274void 275g_io_request(struct bio *bp, struct g_consumer *cp) 276{ 277 struct g_provider *pp; 278 279 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 280 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 281 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request")); 282 pp = cp->provider; 283 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 284 285 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 286 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 287 ("wrong offset %jd for sectorsize %u", 288 bp->bio_offset, cp->provider->sectorsize)); 289 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 290 ("wrong length %jd for sectorsize %u", 291 bp->bio_length, cp->provider->sectorsize)); 292 } 293 294 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 295 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 296 297 bp->bio_from = cp; 298 bp->bio_to = pp; 299 bp->bio_error = 0; 300 bp->bio_completed = 0; 301 302 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 303 ("Bio already on queue bp=%p", bp)); 304 bp->bio_flags |= BIO_ONQUEUE; 305 306 binuptime(&bp->bio_t0); 307 308 /* 309 * The statistics collection is lockless, as such, but we 310 * can not update one instance of the statistics from more 311 * than one thread at a time, so grab the lock first. 312 */ 313 g_bioq_lock(&g_bio_run_down); 314 if (g_collectstats & 1) 315 devstat_start_transaction(pp->stat, &bp->bio_t0); 316 if (g_collectstats & 2) 317 devstat_start_transaction(cp->stat, &bp->bio_t0); 318 319 pp->nstart++; 320 cp->nstart++; 321 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 322 g_bio_run_down.bio_queue_length++; 323 g_bioq_unlock(&g_bio_run_down); 324 325 /* Pass it on down. */ 326 wakeup(&g_wait_down); 327} 328 329void 330g_io_deliver(struct bio *bp, int error) 331{ 332 struct g_consumer *cp; 333 struct g_provider *pp; 334 335 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 336 pp = bp->bio_to; 337 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 338 cp = bp->bio_from; 339 if (cp == NULL) { 340 bp->bio_error = error; 341 bp->bio_done(bp); 342 return; 343 } 344 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 345 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 346 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 347 KASSERT(bp->bio_completed <= bp->bio_length, 348 ("bio_completed can't be greater than bio_length")); 349 350 g_trace(G_T_BIO, 351"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 352 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 353 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 354 355 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 356 ("Bio already on queue bp=%p", bp)); 357 358 /* 359 * XXX: next two doesn't belong here 360 */ 361 bp->bio_bcount = bp->bio_length; 362 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 363 364 /* 365 * The statistics collection is lockless, as such, but we 366 * can not update one instance of the statistics from more 367 * than one thread at a time, so grab the lock first. 368 */ 369 g_bioq_lock(&g_bio_run_up); 370 if (g_collectstats & 1) 371 devstat_end_transaction_bio(pp->stat, bp); 372 if (g_collectstats & 2) 373 devstat_end_transaction_bio(cp->stat, bp); 374 375 cp->nend++; 376 pp->nend++; 377 if (error != ENOMEM) { 378 bp->bio_error = error; 379 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 380 bp->bio_flags |= BIO_ONQUEUE; 381 g_bio_run_up.bio_queue_length++; 382 g_bioq_unlock(&g_bio_run_up); 383 wakeup(&g_wait_up); 384 return; 385 } 386 g_bioq_unlock(&g_bio_run_up); 387 388 if (bootverbose) 389 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 390 bp->bio_children = 0; 391 bp->bio_inbed = 0; 392 g_io_request(bp, cp); 393 pace++; 394 return; 395} 396 397void 398g_io_schedule_down(struct thread *tp __unused) 399{ 400 struct bio *bp; 401 off_t excess; 402 int error; 403#ifdef WITNESS 404 struct mtx mymutex; 405 406 bzero(&mymutex, sizeof mymutex); 407 mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF); 408#endif 409 410 for(;;) { 411 g_bioq_lock(&g_bio_run_down); 412 bp = g_bioq_first(&g_bio_run_down); 413 if (bp == NULL) { 414 CTR0(KTR_GEOM, "g_down going to sleep"); 415 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 416 PRIBIO | PDROP, "-", hz/10); 417 continue; 418 } 419 CTR0(KTR_GEOM, "g_down has work to do"); 420 g_bioq_unlock(&g_bio_run_down); 421 if (pace > 0) { 422 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 423 msleep(&error, NULL, PRIBIO, "g_down", hz/10); 424 pace--; 425 } 426 error = g_io_check(bp); 427 if (error) { 428 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 429 "%s returned %d", bp, bp->bio_to->name, error); 430 g_io_deliver(bp, error); 431 continue; 432 } 433 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 434 bp->bio_to->name); 435 switch (bp->bio_cmd) { 436 case BIO_READ: 437 case BIO_WRITE: 438 case BIO_DELETE: 439 /* Truncate requests to the end of providers media. */ 440 /* 441 * XXX: What if we truncate because of offset being 442 * bad, not length? 443 */ 444 excess = bp->bio_offset + bp->bio_length; 445 if (excess > bp->bio_to->mediasize) { 446 excess -= bp->bio_to->mediasize; 447 bp->bio_length -= excess; 448 if (excess > 0) 449 CTR3(KTR_GEOM, "g_down truncated bio " 450 "%p provider %s by %d", bp, 451 bp->bio_to->name, excess); 452 } 453 /* Deliver zero length transfers right here. */ 454 if (bp->bio_length == 0) { 455 g_io_deliver(bp, 0); 456 CTR2(KTR_GEOM, "g_down terminated 0-length " 457 "bp %p provider %s", bp, bp->bio_to->name); 458 continue; 459 } 460 break; 461 default: 462 break; 463 } 464#ifdef WITNESS 465 mtx_lock(&mymutex); 466#endif 467 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 468 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 469 bp->bio_length); 470 bp->bio_to->geom->start(bp); 471#ifdef WITNESS 472 mtx_unlock(&mymutex); 473#endif 474 } 475} 476 477void 478bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 479{ 480 bp->bio_task = func; 481 bp->bio_task_arg = arg; 482 /* 483 * The taskqueue is actually just a second queue off the "up" 484 * queue, so we use the same lock. 485 */ 486 g_bioq_lock(&g_bio_run_up); 487 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 488 ("Bio already on queue bp=%p target taskq", bp)); 489 bp->bio_flags |= BIO_ONQUEUE; 490 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 491 g_bio_run_task.bio_queue_length++; 492 wakeup(&g_wait_up); 493 g_bioq_unlock(&g_bio_run_up); 494} 495 496 497void 498g_io_schedule_up(struct thread *tp __unused) 499{ 500 struct bio *bp; 501#ifdef WITNESS 502 struct mtx mymutex; 503 504 bzero(&mymutex, sizeof mymutex); 505 mtx_init(&mymutex, "g_xup", NULL, MTX_DEF); 506#endif 507 for(;;) { 508 g_bioq_lock(&g_bio_run_up); 509 bp = g_bioq_first(&g_bio_run_task); 510 if (bp != NULL) { 511 g_bioq_unlock(&g_bio_run_up); 512#ifdef WITNESS 513 mtx_lock(&mymutex); 514#endif 515 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 516 bp->bio_task(bp->bio_task_arg); 517#ifdef WITNESS 518 mtx_unlock(&mymutex); 519#endif 520 continue; 521 } 522 bp = g_bioq_first(&g_bio_run_up); 523 if (bp != NULL) { 524 g_bioq_unlock(&g_bio_run_up); 525#ifdef WITNESS 526 mtx_lock(&mymutex); 527#endif 528 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 529 "%ld len %ld", bp, bp->bio_to->name, 530 bp->bio_offset, bp->bio_length); 531 biodone(bp); 532#ifdef WITNESS 533 mtx_unlock(&mymutex); 534#endif 535 continue; 536 } 537 CTR0(KTR_GEOM, "g_up going to sleep"); 538 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 539 PRIBIO | PDROP, "-", hz/10); 540 } 541} 542 543void * 544g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 545{ 546 struct bio *bp; 547 void *ptr; 548 int errorc; 549 550 KASSERT(length > 0 && length >= cp->provider->sectorsize && 551 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 552 (intmax_t)length)); 553 554 bp = g_alloc_bio(); 555 bp->bio_cmd = BIO_READ; 556 bp->bio_done = NULL; 557 bp->bio_offset = offset; 558 bp->bio_length = length; 559 ptr = g_malloc(length, M_WAITOK); 560 bp->bio_data = ptr; 561 g_io_request(bp, cp); 562 errorc = biowait(bp, "gread"); 563 if (error != NULL) 564 *error = errorc; 565 g_destroy_bio(bp); 566 if (errorc) { 567 g_free(ptr); 568 ptr = NULL; 569 } 570 return (ptr); 571} 572 573int 574g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 575{ 576 struct bio *bp; 577 int error; 578 579 KASSERT(length > 0 && length >= cp->provider->sectorsize && 580 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 581 (intmax_t)length)); 582 583 bp = g_alloc_bio(); 584 bp->bio_cmd = BIO_WRITE; 585 bp->bio_done = NULL; 586 bp->bio_offset = offset; 587 bp->bio_length = length; 588 bp->bio_data = ptr; 589 g_io_request(bp, cp); 590 error = biowait(bp, "gwrite"); 591 g_destroy_bio(bp); 592 return (error); 593} 594 595void 596g_print_bio(struct bio *bp) 597{ 598 const char *pname, *cmd = NULL; 599 600 if (bp->bio_to != NULL) 601 pname = bp->bio_to->name; 602 else 603 pname = "[unknown]"; 604 605 switch (bp->bio_cmd) { 606 case BIO_GETATTR: 607 cmd = "GETATTR"; 608 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 609 return; 610 case BIO_READ: 611 cmd = "READ"; 612 case BIO_WRITE: 613 if (cmd == NULL) 614 cmd = "WRITE"; 615 case BIO_DELETE: 616 if (cmd == NULL) 617 cmd = "DELETE"; 618 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 619 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 620 return; 621 default: 622 cmd = "UNKNOWN"; 623 printf("%s[%s()]", pname, cmd); 624 return; 625 } 626 /* NOTREACHED */ 627} 628