geom_io.c revision 135873
1135045Ssobomax/*- 2135045Ssobomax * Copyright (c) 2002 Poul-Henning Kamp 3135045Ssobomax * Copyright (c) 2002 Networks Associates Technology, Inc. 4135092Smarcel * All rights reserved. 5298504Ssobomax * 6322983Ssobomax * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7135045Ssobomax * and NAI Labs, the Security Research Division of Network Associates, Inc. 8298504Ssobomax * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9135045Ssobomax * DARPA CHATS research program. 10298504Ssobomax * 11298504Ssobomax * Redistribution and use in source and binary forms, with or without 12135045Ssobomax * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 135873 2004-09-28 08:34:27Z pjd $"); 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/bio.h> 44 45#include <sys/errno.h> 46#include <geom/geom.h> 47#include <geom/geom_int.h> 48#include <sys/devicestat.h> 49 50#include <vm/uma.h> 51 52static struct g_bioq g_bio_run_down; 53static struct g_bioq g_bio_run_up; 54static struct g_bioq g_bio_run_task; 55 56static u_int pace; 57static uma_zone_t biozone; 58 59#include <machine/atomic.h> 60 61static void 62g_bioq_lock(struct g_bioq *bq) 63{ 64 65 mtx_lock(&bq->bio_queue_lock); 66} 67 68static void 69g_bioq_unlock(struct g_bioq *bq) 70{ 71 72 mtx_unlock(&bq->bio_queue_lock); 73} 74 75#if 0 76static void 77g_bioq_destroy(struct g_bioq *bq) 78{ 79 80 mtx_destroy(&bq->bio_queue_lock); 81} 82#endif 83 84static void 85g_bioq_init(struct g_bioq *bq) 86{ 87 88 TAILQ_INIT(&bq->bio_queue); 89 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 90} 91 92static struct bio * 93g_bioq_first(struct g_bioq *bq) 94{ 95 struct bio *bp; 96 97 bp = TAILQ_FIRST(&bq->bio_queue); 98 if (bp != NULL) { 99 KASSERT((bp->bio_flags & BIO_ONQUEUE), 100 ("Bio not on queue bp=%p target %p", bp, bq)); 101 bp->bio_flags &= ~BIO_ONQUEUE; 102 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 103 bq->bio_queue_length--; 104 } 105 return (bp); 106} 107 108static void 109g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq) 110{ 111 112 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 113 ("Bio already on queue bp=%p target %p", bp, rq)); 114 bp->bio_flags |= BIO_ONQUEUE; 115 g_bioq_lock(rq); 116 TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue); 117 rq->bio_queue_length++; 118 g_bioq_unlock(rq); 119} 120 121struct bio * 122g_new_bio(void) 123{ 124 struct bio *bp; 125 126 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 127 return (bp); 128} 129 130struct bio * 131g_alloc_bio(void) 132{ 133 struct bio *bp; 134 135 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 136 return (bp); 137} 138 139void 140g_destroy_bio(struct bio *bp) 141{ 142 143 uma_zfree(biozone, bp); 144} 145 146struct bio * 147g_clone_bio(struct bio *bp) 148{ 149 struct bio *bp2; 150 151 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 152 if (bp2 != NULL) { 153 bp2->bio_parent = bp; 154 bp2->bio_cmd = bp->bio_cmd; 155 bp2->bio_length = bp->bio_length; 156 bp2->bio_offset = bp->bio_offset; 157 bp2->bio_data = bp->bio_data; 158 bp2->bio_attribute = bp->bio_attribute; 159 bp->bio_children++; 160 } 161 return(bp2); 162} 163 164void 165g_io_init() 166{ 167 168 g_bioq_init(&g_bio_run_down); 169 g_bioq_init(&g_bio_run_up); 170 g_bioq_init(&g_bio_run_task); 171 biozone = uma_zcreate("g_bio", sizeof (struct bio), 172 NULL, NULL, 173 NULL, NULL, 174 0, 0); 175} 176 177int 178g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 179{ 180 struct bio *bp; 181 int error; 182 183 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 184 bp = g_alloc_bio(); 185 bp->bio_cmd = BIO_GETATTR; 186 bp->bio_done = NULL; 187 bp->bio_attribute = attr; 188 bp->bio_length = *len; 189 bp->bio_data = ptr; 190 g_io_request(bp, cp); 191 error = biowait(bp, "ggetattr"); 192 *len = bp->bio_completed; 193 g_destroy_bio(bp); 194 return (error); 195} 196 197static int 198g_io_check(struct bio *bp) 199{ 200 struct g_consumer *cp; 201 struct g_provider *pp; 202 203 cp = bp->bio_from; 204 pp = bp->bio_to; 205 206 /* Fail if access counters dont allow the operation */ 207 switch(bp->bio_cmd) { 208 case BIO_READ: 209 case BIO_GETATTR: 210 if (cp->acr == 0) 211 return (EPERM); 212 break; 213 case BIO_WRITE: 214 case BIO_DELETE: 215 if (cp->acw == 0) 216 return (EPERM); 217 break; 218 default: 219 return (EPERM); 220 } 221 /* if provider is marked for error, don't disturb. */ 222 if (pp->error) 223 return (pp->error); 224 225 switch(bp->bio_cmd) { 226 case BIO_READ: 227 case BIO_WRITE: 228 case BIO_DELETE: 229 /* Zero sectorsize is a probably lack of media */ 230 if (pp->sectorsize == 0) 231 return (ENXIO); 232 /* Reject I/O not on sector boundary */ 233 if (bp->bio_offset % pp->sectorsize) 234 return (EINVAL); 235 /* Reject I/O not integral sector long */ 236 if (bp->bio_length % pp->sectorsize) 237 return (EINVAL); 238 /* Reject requests before or past the end of media. */ 239 if (bp->bio_offset < 0) 240 return (EIO); 241 if (bp->bio_offset > pp->mediasize) 242 return (EIO); 243 break; 244 default: 245 break; 246 } 247 return (0); 248} 249 250void 251g_io_request(struct bio *bp, struct g_consumer *cp) 252{ 253 struct g_provider *pp; 254 255 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 256 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 257 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request")); 258 pp = cp->provider; 259 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 260 261 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 262 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 263 ("wrong offset %jd for sectorsize %u", 264 bp->bio_offset, cp->provider->sectorsize)); 265 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 266 ("wrong length %jd for sectorsize %u", 267 bp->bio_length, cp->provider->sectorsize)); 268 } 269 270 bp->bio_from = cp; 271 bp->bio_to = pp; 272 bp->bio_error = 0; 273 bp->bio_completed = 0; 274 275 if (g_collectstats & 1) 276 devstat_start_transaction_bio(pp->stat, bp); 277 pp->nstart++; 278 if (g_collectstats & 2) 279 devstat_start_transaction_bio(cp->stat, bp); 280 cp->nstart++; 281 282 /* Pass it on down. */ 283 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 284 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 285 g_bioq_enqueue_tail(bp, &g_bio_run_down); 286 wakeup(&g_wait_down); 287} 288 289void 290g_io_deliver(struct bio *bp, int error) 291{ 292 struct g_consumer *cp; 293 struct g_provider *pp; 294 295 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 296 pp = bp->bio_to; 297 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 298 cp = bp->bio_from; 299 if (cp == NULL) { 300 bp->bio_error = error; 301 bp->bio_done(bp); 302 return; 303 } 304 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 305 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 306 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 307 KASSERT(bp->bio_completed <= bp->bio_length, 308 ("bio_completed can't be greater than bio_length")); 309 310 g_trace(G_T_BIO, 311"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 312 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 313 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 314 315 /* 316 * XXX: next two doesn't belong here 317 */ 318 bp->bio_bcount = bp->bio_length; 319 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 320 if (g_collectstats & 1) 321 devstat_end_transaction_bio(pp->stat, bp); 322 if (g_collectstats & 2) 323 devstat_end_transaction_bio(cp->stat, bp); 324 cp->nend++; 325 pp->nend++; 326 327 if (error == ENOMEM) { 328 if (bootverbose) 329 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 330 bp->bio_children = 0; 331 bp->bio_inbed = 0; 332 g_io_request(bp, cp); 333 pace++; 334 return; 335 } 336 bp->bio_error = error; 337 g_bioq_enqueue_tail(bp, &g_bio_run_up); 338 wakeup(&g_wait_up); 339} 340 341void 342g_io_schedule_down(struct thread *tp __unused) 343{ 344 struct bio *bp; 345 off_t excess; 346 int error; 347#ifdef WITNESS 348 struct mtx mymutex; 349 350 bzero(&mymutex, sizeof mymutex); 351 mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF); 352#endif 353 354 for(;;) { 355 g_bioq_lock(&g_bio_run_down); 356 bp = g_bioq_first(&g_bio_run_down); 357 if (bp == NULL) { 358 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 359 PRIBIO | PDROP, "-", hz/10); 360 continue; 361 } 362 g_bioq_unlock(&g_bio_run_down); 363 if (pace > 0) { 364 msleep(&error, NULL, PRIBIO, "g_down", hz/10); 365 pace--; 366 } 367 error = g_io_check(bp); 368 if (error) { 369 g_io_deliver(bp, error); 370 continue; 371 } 372 switch (bp->bio_cmd) { 373 case BIO_READ: 374 case BIO_WRITE: 375 case BIO_DELETE: 376 /* Truncate requests to the end of providers media. */ 377 excess = bp->bio_offset + bp->bio_length; 378 if (excess > bp->bio_to->mediasize) { 379 excess -= bp->bio_to->mediasize; 380 bp->bio_length -= excess; 381 } 382 /* Deliver zero length transfers right here. */ 383 if (bp->bio_length == 0) { 384 g_io_deliver(bp, 0); 385 continue; 386 } 387 break; 388 default: 389 break; 390 } 391#ifdef WITNESS 392 mtx_lock(&mymutex); 393#endif 394 bp->bio_to->geom->start(bp); 395#ifdef WITNESS 396 mtx_unlock(&mymutex); 397#endif 398 } 399} 400 401void 402bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 403{ 404 bp->bio_task = func; 405 bp->bio_task_arg = arg; 406 /* 407 * The taskqueue is actually just a second queue off the "up" 408 * queue, so we use the same lock. 409 */ 410 g_bioq_lock(&g_bio_run_up); 411 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 412 ("Bio already on queue bp=%p target taskq", bp)); 413 bp->bio_flags |= BIO_ONQUEUE; 414 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 415 g_bio_run_task.bio_queue_length++; 416 wakeup(&g_wait_up); 417 g_bioq_unlock(&g_bio_run_up); 418} 419 420 421void 422g_io_schedule_up(struct thread *tp __unused) 423{ 424 struct bio *bp; 425#ifdef WITNESS 426 struct mtx mymutex; 427 428 bzero(&mymutex, sizeof mymutex); 429 mtx_init(&mymutex, "g_xup", NULL, MTX_DEF); 430#endif 431 for(;;) { 432 g_bioq_lock(&g_bio_run_up); 433 bp = g_bioq_first(&g_bio_run_task); 434 if (bp != NULL) { 435 g_bioq_unlock(&g_bio_run_up); 436#ifdef WITNESS 437 mtx_lock(&mymutex); 438#endif 439 bp->bio_task(bp->bio_task_arg); 440#ifdef WITNESS 441 mtx_unlock(&mymutex); 442#endif 443 continue; 444 } 445 bp = g_bioq_first(&g_bio_run_up); 446 if (bp != NULL) { 447 g_bioq_unlock(&g_bio_run_up); 448#ifdef WITNESS 449 mtx_lock(&mymutex); 450#endif 451 biodone(bp); 452#ifdef WITNESS 453 mtx_unlock(&mymutex); 454#endif 455 continue; 456 } 457 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 458 PRIBIO | PDROP, "-", hz/10); 459 } 460} 461 462void * 463g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 464{ 465 struct bio *bp; 466 void *ptr; 467 int errorc; 468 469 KASSERT(length > 0 && length >= cp->provider->sectorsize && 470 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 471 (intmax_t)length)); 472 473 bp = g_alloc_bio(); 474 bp->bio_cmd = BIO_READ; 475 bp->bio_done = NULL; 476 bp->bio_offset = offset; 477 bp->bio_length = length; 478 ptr = g_malloc(length, M_WAITOK); 479 bp->bio_data = ptr; 480 g_io_request(bp, cp); 481 errorc = biowait(bp, "gread"); 482 if (error != NULL) 483 *error = errorc; 484 g_destroy_bio(bp); 485 if (errorc) { 486 g_free(ptr); 487 ptr = NULL; 488 } 489 return (ptr); 490} 491 492int 493g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 494{ 495 struct bio *bp; 496 int error; 497 498 KASSERT(length > 0 && length >= cp->provider->sectorsize && 499 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 500 (intmax_t)length)); 501 502 bp = g_alloc_bio(); 503 bp->bio_cmd = BIO_WRITE; 504 bp->bio_done = NULL; 505 bp->bio_offset = offset; 506 bp->bio_length = length; 507 bp->bio_data = ptr; 508 g_io_request(bp, cp); 509 error = biowait(bp, "gwrite"); 510 g_destroy_bio(bp); 511 return (error); 512} 513 514void 515g_print_bio(struct bio *bp) 516{ 517 const char *pname, *cmd = NULL; 518 519 if (bp->bio_to != NULL) 520 pname = bp->bio_to->name; 521 else 522 pname = "[unknown]"; 523 524 switch (bp->bio_cmd) { 525 case BIO_GETATTR: 526 cmd = "GETATTR"; 527 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 528 return; 529 case BIO_READ: 530 cmd = "READ"; 531 case BIO_WRITE: 532 if (cmd == NULL) 533 cmd = "WRITE"; 534 case BIO_DELETE: 535 if (cmd == NULL) 536 cmd = "DELETE"; 537 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 538 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 539 return; 540 default: 541 cmd = "UNKNOWN"; 542 printf("%s[%s()]", pname, cmd); 543 return; 544 } 545 /* NOTREACHED */ 546} 547