geom_io.c revision 136399
1/*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 136399 2004-10-11 21:22:59Z ups $"); 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/bio.h> 44 45#include <sys/errno.h> 46#include <geom/geom.h> 47#include <geom/geom_int.h> 48#include <sys/devicestat.h> 49 50#include <vm/uma.h> 51 52static struct g_bioq g_bio_run_down; 53static struct g_bioq g_bio_run_up; 54static struct g_bioq g_bio_run_task; 55 56static u_int pace; 57static uma_zone_t biozone; 58 59#include <machine/atomic.h> 60 61static void 62g_bioq_lock(struct g_bioq *bq) 63{ 64 65 mtx_lock(&bq->bio_queue_lock); 66} 67 68static void 69g_bioq_unlock(struct g_bioq *bq) 70{ 71 72 mtx_unlock(&bq->bio_queue_lock); 73} 74 75#if 0 76static void 77g_bioq_destroy(struct g_bioq *bq) 78{ 79 80 mtx_destroy(&bq->bio_queue_lock); 81} 82#endif 83 84static void 85g_bioq_init(struct g_bioq *bq) 86{ 87 88 TAILQ_INIT(&bq->bio_queue); 89 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 90} 91 92static struct bio * 93g_bioq_first(struct g_bioq *bq) 94{ 95 struct bio *bp; 96 97 bp = TAILQ_FIRST(&bq->bio_queue); 98 if (bp != NULL) { 99 KASSERT((bp->bio_flags & BIO_ONQUEUE), 100 ("Bio not on queue bp=%p target %p", bp, bq)); 101 bp->bio_flags &= ~BIO_ONQUEUE; 102 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 103 bq->bio_queue_length--; 104 } 105 return (bp); 106} 107 108struct bio * 109g_new_bio(void) 110{ 111 struct bio *bp; 112 113 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 114 return (bp); 115} 116 117struct bio * 118g_alloc_bio(void) 119{ 120 struct bio *bp; 121 122 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 123 return (bp); 124} 125 126void 127g_destroy_bio(struct bio *bp) 128{ 129 130 uma_zfree(biozone, bp); 131} 132 133struct bio * 134g_clone_bio(struct bio *bp) 135{ 136 struct bio *bp2; 137 138 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 139 if (bp2 != NULL) { 140 bp2->bio_parent = bp; 141 bp2->bio_cmd = bp->bio_cmd; 142 bp2->bio_length = bp->bio_length; 143 bp2->bio_offset = bp->bio_offset; 144 bp2->bio_data = bp->bio_data; 145 bp2->bio_attribute = bp->bio_attribute; 146 bp->bio_children++; 147 } 148 return(bp2); 149} 150 151void 152g_io_init() 153{ 154 155 g_bioq_init(&g_bio_run_down); 156 g_bioq_init(&g_bio_run_up); 157 g_bioq_init(&g_bio_run_task); 158 biozone = uma_zcreate("g_bio", sizeof (struct bio), 159 NULL, NULL, 160 NULL, NULL, 161 0, 0); 162} 163 164int 165g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 166{ 167 struct bio *bp; 168 int error; 169 170 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 171 bp = g_alloc_bio(); 172 bp->bio_cmd = BIO_GETATTR; 173 bp->bio_done = NULL; 174 bp->bio_attribute = attr; 175 bp->bio_length = *len; 176 bp->bio_data = ptr; 177 g_io_request(bp, cp); 178 error = biowait(bp, "ggetattr"); 179 *len = bp->bio_completed; 180 g_destroy_bio(bp); 181 return (error); 182} 183 184static int 185g_io_check(struct bio *bp) 186{ 187 struct g_consumer *cp; 188 struct g_provider *pp; 189 190 cp = bp->bio_from; 191 pp = bp->bio_to; 192 193 /* Fail if access counters dont allow the operation */ 194 switch(bp->bio_cmd) { 195 case BIO_READ: 196 case BIO_GETATTR: 197 if (cp->acr == 0) 198 return (EPERM); 199 break; 200 case BIO_WRITE: 201 case BIO_DELETE: 202 if (cp->acw == 0) 203 return (EPERM); 204 break; 205 default: 206 return (EPERM); 207 } 208 /* if provider is marked for error, don't disturb. */ 209 if (pp->error) 210 return (pp->error); 211 212 switch(bp->bio_cmd) { 213 case BIO_READ: 214 case BIO_WRITE: 215 case BIO_DELETE: 216 /* Zero sectorsize is a probably lack of media */ 217 if (pp->sectorsize == 0) 218 return (ENXIO); 219 /* Reject I/O not on sector boundary */ 220 if (bp->bio_offset % pp->sectorsize) 221 return (EINVAL); 222 /* Reject I/O not integral sector long */ 223 if (bp->bio_length % pp->sectorsize) 224 return (EINVAL); 225 /* Reject requests before or past the end of media. */ 226 if (bp->bio_offset < 0) 227 return (EIO); 228 if (bp->bio_offset > pp->mediasize) 229 return (EIO); 230 break; 231 default: 232 break; 233 } 234 return (0); 235} 236 237void 238g_io_request(struct bio *bp, struct g_consumer *cp) 239{ 240 struct g_provider *pp; 241 242 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 243 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 244 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request")); 245 pp = cp->provider; 246 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 247 248 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 249 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 250 ("wrong offset %jd for sectorsize %u", 251 bp->bio_offset, cp->provider->sectorsize)); 252 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 253 ("wrong length %jd for sectorsize %u", 254 bp->bio_length, cp->provider->sectorsize)); 255 } 256 257 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 258 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 259 260 bp->bio_from = cp; 261 bp->bio_to = pp; 262 bp->bio_error = 0; 263 bp->bio_completed = 0; 264 265 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 266 ("Bio already on queue bp=%p", bp)); 267 bp->bio_flags |= BIO_ONQUEUE; 268 269 binuptime(&bp->bio_t0); 270 if (g_collectstats & 4) 271 g_bioq_lock(&g_bio_run_down); 272 if (g_collectstats & 1) 273 devstat_start_transaction(pp->stat, &bp->bio_t0); 274 if (g_collectstats & 2) 275 devstat_start_transaction(cp->stat, &bp->bio_t0); 276 277 if (!(g_collectstats & 4)) 278 g_bioq_lock(&g_bio_run_down); 279 pp->nstart++; 280 cp->nstart++; 281 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 282 g_bio_run_down.bio_queue_length++; 283 g_bioq_unlock(&g_bio_run_down); 284 285 /* Pass it on down. */ 286 wakeup(&g_wait_down); 287} 288 289void 290g_io_deliver(struct bio *bp, int error) 291{ 292 struct g_consumer *cp; 293 struct g_provider *pp; 294 295 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 296 pp = bp->bio_to; 297 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 298 cp = bp->bio_from; 299 if (cp == NULL) { 300 bp->bio_error = error; 301 bp->bio_done(bp); 302 return; 303 } 304 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 305 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 306 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 307 KASSERT(bp->bio_completed <= bp->bio_length, 308 ("bio_completed can't be greater than bio_length")); 309 310 g_trace(G_T_BIO, 311"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 312 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 313 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 314 315 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 316 ("Bio already on queue bp=%p", bp)); 317 318 /* 319 * XXX: next two doesn't belong here 320 */ 321 bp->bio_bcount = bp->bio_length; 322 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 323 324 if (g_collectstats & 4) 325 g_bioq_lock(&g_bio_run_up); 326 if (g_collectstats & 1) 327 devstat_end_transaction_bio(pp->stat, bp); 328 if (g_collectstats & 2) 329 devstat_end_transaction_bio(cp->stat, bp); 330 if (!(g_collectstats & 4)) 331 g_bioq_lock(&g_bio_run_up); 332 cp->nend++; 333 pp->nend++; 334 if (error != ENOMEM) { 335 bp->bio_error = error; 336 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 337 bp->bio_flags |= BIO_ONQUEUE; 338 g_bio_run_up.bio_queue_length++; 339 g_bioq_unlock(&g_bio_run_up); 340 wakeup(&g_wait_up); 341 return; 342 } 343 g_bioq_unlock(&g_bio_run_up); 344 345 if (bootverbose) 346 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 347 bp->bio_children = 0; 348 bp->bio_inbed = 0; 349 g_io_request(bp, cp); 350 pace++; 351 return; 352} 353 354void 355g_io_schedule_down(struct thread *tp __unused) 356{ 357 struct bio *bp; 358 off_t excess; 359 int error; 360#ifdef WITNESS 361 struct mtx mymutex; 362 363 bzero(&mymutex, sizeof mymutex); 364 mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF); 365#endif 366 367 for(;;) { 368 g_bioq_lock(&g_bio_run_down); 369 bp = g_bioq_first(&g_bio_run_down); 370 if (bp == NULL) { 371 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 372 PRIBIO | PDROP, "-", hz/10); 373 continue; 374 } 375 g_bioq_unlock(&g_bio_run_down); 376 if (pace > 0) { 377 msleep(&error, NULL, PRIBIO, "g_down", hz/10); 378 pace--; 379 } 380 error = g_io_check(bp); 381 if (error) { 382 g_io_deliver(bp, error); 383 continue; 384 } 385 switch (bp->bio_cmd) { 386 case BIO_READ: 387 case BIO_WRITE: 388 case BIO_DELETE: 389 /* Truncate requests to the end of providers media. */ 390 excess = bp->bio_offset + bp->bio_length; 391 if (excess > bp->bio_to->mediasize) { 392 excess -= bp->bio_to->mediasize; 393 bp->bio_length -= excess; 394 } 395 /* Deliver zero length transfers right here. */ 396 if (bp->bio_length == 0) { 397 g_io_deliver(bp, 0); 398 continue; 399 } 400 break; 401 default: 402 break; 403 } 404#ifdef WITNESS 405 mtx_lock(&mymutex); 406#endif 407 bp->bio_to->geom->start(bp); 408#ifdef WITNESS 409 mtx_unlock(&mymutex); 410#endif 411 } 412} 413 414void 415bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 416{ 417 bp->bio_task = func; 418 bp->bio_task_arg = arg; 419 /* 420 * The taskqueue is actually just a second queue off the "up" 421 * queue, so we use the same lock. 422 */ 423 g_bioq_lock(&g_bio_run_up); 424 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 425 ("Bio already on queue bp=%p target taskq", bp)); 426 bp->bio_flags |= BIO_ONQUEUE; 427 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 428 g_bio_run_task.bio_queue_length++; 429 wakeup(&g_wait_up); 430 g_bioq_unlock(&g_bio_run_up); 431} 432 433 434void 435g_io_schedule_up(struct thread *tp __unused) 436{ 437 struct bio *bp; 438#ifdef WITNESS 439 struct mtx mymutex; 440 441 bzero(&mymutex, sizeof mymutex); 442 mtx_init(&mymutex, "g_xup", NULL, MTX_DEF); 443#endif 444 for(;;) { 445 g_bioq_lock(&g_bio_run_up); 446 bp = g_bioq_first(&g_bio_run_task); 447 if (bp != NULL) { 448 g_bioq_unlock(&g_bio_run_up); 449#ifdef WITNESS 450 mtx_lock(&mymutex); 451#endif 452 bp->bio_task(bp->bio_task_arg); 453#ifdef WITNESS 454 mtx_unlock(&mymutex); 455#endif 456 continue; 457 } 458 bp = g_bioq_first(&g_bio_run_up); 459 if (bp != NULL) { 460 g_bioq_unlock(&g_bio_run_up); 461#ifdef WITNESS 462 mtx_lock(&mymutex); 463#endif 464 biodone(bp); 465#ifdef WITNESS 466 mtx_unlock(&mymutex); 467#endif 468 continue; 469 } 470 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 471 PRIBIO | PDROP, "-", hz/10); 472 } 473} 474 475void * 476g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 477{ 478 struct bio *bp; 479 void *ptr; 480 int errorc; 481 482 KASSERT(length > 0 && length >= cp->provider->sectorsize && 483 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 484 (intmax_t)length)); 485 486 bp = g_alloc_bio(); 487 bp->bio_cmd = BIO_READ; 488 bp->bio_done = NULL; 489 bp->bio_offset = offset; 490 bp->bio_length = length; 491 ptr = g_malloc(length, M_WAITOK); 492 bp->bio_data = ptr; 493 g_io_request(bp, cp); 494 errorc = biowait(bp, "gread"); 495 if (error != NULL) 496 *error = errorc; 497 g_destroy_bio(bp); 498 if (errorc) { 499 g_free(ptr); 500 ptr = NULL; 501 } 502 return (ptr); 503} 504 505int 506g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 507{ 508 struct bio *bp; 509 int error; 510 511 KASSERT(length > 0 && length >= cp->provider->sectorsize && 512 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 513 (intmax_t)length)); 514 515 bp = g_alloc_bio(); 516 bp->bio_cmd = BIO_WRITE; 517 bp->bio_done = NULL; 518 bp->bio_offset = offset; 519 bp->bio_length = length; 520 bp->bio_data = ptr; 521 g_io_request(bp, cp); 522 error = biowait(bp, "gwrite"); 523 g_destroy_bio(bp); 524 return (error); 525} 526 527void 528g_print_bio(struct bio *bp) 529{ 530 const char *pname, *cmd = NULL; 531 532 if (bp->bio_to != NULL) 533 pname = bp->bio_to->name; 534 else 535 pname = "[unknown]"; 536 537 switch (bp->bio_cmd) { 538 case BIO_GETATTR: 539 cmd = "GETATTR"; 540 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 541 return; 542 case BIO_READ: 543 cmd = "READ"; 544 case BIO_WRITE: 545 if (cmd == NULL) 546 cmd = "WRITE"; 547 case BIO_DELETE: 548 if (cmd == NULL) 549 cmd = "DELETE"; 550 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 551 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 552 return; 553 default: 554 cmd = "UNKNOWN"; 555 printf("%s[%s()]", pname, cmd); 556 return; 557 } 558 /* NOTREACHED */ 559} 560