geom_io.c revision 93238
1/*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD: head/sys/geom/geom_io.c 93238 2002-03-26 19:16:37Z phk $ 36 */ 37 38 39#include <sys/param.h> 40#ifndef _KERNEL 41#include <stdio.h> 42#include <string.h> 43#include <stdlib.h> 44#include <signal.h> 45#include <err.h> 46#include <sched.h> 47#else 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/malloc.h> 51#include <sys/bio.h> 52#endif 53 54#include <sys/errno.h> 55#include <geom/geom.h> 56 57static struct g_bioq g_bio_run_down; 58static struct g_bioq g_bio_run_up; 59static struct g_bioq g_bio_idle; 60 61#include <machine/atomic.h> 62 63static void 64g_bioq_lock(struct g_bioq *bq) 65{ 66 67 mtx_lock(&bq->bio_queue_lock); 68} 69 70static void 71g_bioq_unlock(struct g_bioq *bq) 72{ 73 74 mtx_unlock(&bq->bio_queue_lock); 75} 76 77#if 0 78static void 79g_bioq_destroy(struct g_bioq *bq) 80{ 81 82 mtx_destroy(&bq->bio_queue_lock); 83} 84#endif 85 86static void 87g_bioq_init(struct g_bioq *bq) 88{ 89 90 TAILQ_INIT(&bq->bio_queue); 91 mtx_init(&bq->bio_queue_lock, "bio queue", MTX_DEF); 92} 93 94static struct bio * 95g_bioq_first(struct g_bioq *bq) 96{ 97 struct bio *bp; 98 99 g_bioq_lock(bq); 100 bp = TAILQ_FIRST(&bq->bio_queue); 101 if (bp != NULL) { 102 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 103 bq->bio_queue_length--; 104 } 105 g_bioq_unlock(bq); 106 return (bp); 107} 108 109static void 110g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq) 111{ 112 113 g_bioq_lock(rq); 114 TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue); 115 rq->bio_queue_length++; 116 g_bioq_unlock(rq); 117} 118 119struct bio * 120g_new_bio(void) 121{ 122 struct bio *bp; 123 124 bp = g_bioq_first(&g_bio_idle); 125 if (bp == NULL) 126 bp = g_malloc(sizeof *bp, M_WAITOK | M_ZERO); 127 g_trace(G_T_BIO, "g_new_bio() = %p", bp); 128 return (bp); 129} 130 131void 132g_destroy_bio(struct bio *bp) 133{ 134 135 g_trace(G_T_BIO, "g_destroy_bio(%p)", bp); 136 bzero(bp, sizeof *bp); 137 g_bioq_enqueue_tail(bp, &g_bio_idle); 138} 139 140struct bio * 141g_clone_bio(struct bio *bp) 142{ 143 struct bio *bp2; 144 145 bp2 = g_new_bio(); 146 bp2->bio_linkage = bp; 147 bp2->bio_cmd = bp->bio_cmd; 148 bp2->bio_length = bp->bio_length; 149 bp2->bio_offset = bp->bio_offset; 150 bp2->bio_data = bp->bio_data; 151 bp2->bio_attribute = bp->bio_attribute; 152 g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2); 153 return(bp2); 154} 155 156void 157g_io_init() 158{ 159 160 g_bioq_init(&g_bio_run_down); 161 g_bioq_init(&g_bio_run_up); 162 g_bioq_init(&g_bio_idle); 163} 164 165int 166g_io_setattr(char *attr, struct g_consumer *cp, int len, void *ptr, struct thread *tp __unused) 167{ 168 struct bio *bp; 169 int error; 170 171 g_trace(G_T_BIO, "bio_setattr(%s)", attr); 172 do { 173 bp = g_new_bio(); 174 bp->bio_cmd = BIO_SETATTR; 175 bp->bio_done = NULL; 176 bp->bio_attribute = attr; 177 bp->bio_length = len; 178 bp->bio_data = ptr; 179 g_io_request(bp, cp); 180 while ((bp->bio_flags & BIO_DONE) == 0) { 181 mtx_lock(&Giant); 182 tsleep(bp, 0, "setattr", hz / 10); 183 mtx_unlock(&Giant); 184 } 185 error = bp->bio_error; 186 g_destroy_bio(bp); 187 if (error == EBUSY) 188 tsleep(&error, 0, "setattr_busy", hz); 189 } while(error == EBUSY); 190 return (error); 191} 192 193 194int 195g_io_getattr(char *attr, struct g_consumer *cp, int *len, void *ptr, struct thread *tp __unused) 196{ 197 struct bio *bp; 198 int error; 199 200 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 201 do { 202 bp = g_new_bio(); 203 bp->bio_cmd = BIO_GETATTR; 204 bp->bio_done = NULL; 205 bp->bio_attribute = attr; 206 bp->bio_length = *len; 207 bp->bio_data = ptr; 208 g_io_request(bp, cp); 209 while ((bp->bio_flags & BIO_DONE) == 0) { 210 mtx_lock(&Giant); 211 tsleep(bp, 0, "getattr", hz / 10); 212 mtx_unlock(&Giant); 213 } 214 *len = bp->bio_completed; 215 error = bp->bio_error; 216 g_destroy_bio(bp); 217 if (error == EBUSY) 218 tsleep(&error, 0, "getattr_busy", hz); 219 220 } while(error == EBUSY); 221 return (error); 222} 223 224void 225g_io_request(struct bio *bp, struct g_consumer *cp) 226{ 227 int error; 228 229 KASSERT(cp != NULL, ("bio_request on thin air")); 230 error = 0; 231 bp->bio_from = cp; 232 bp->bio_to = cp->provider; 233 234 /* begin_stats(&bp->stats); */ 235 236 atomic_add_int(&cp->biocount, 1); 237 if (bp->bio_to == NULL) 238 error = ENXIO; 239 if (!error) { 240 switch(bp->bio_cmd) { 241 case BIO_READ: 242 case BIO_GETATTR: 243 if (cp->acr == 0) 244 error = EPERM; 245 break; 246 case BIO_WRITE: 247 if (cp->acw == 0) 248 error = EPERM; 249 break; 250 case BIO_SETATTR: 251 case BIO_DELETE: 252 if ((cp->acw == 0) || (cp->ace == 0)) 253 error = EPERM; 254 break; 255 default: 256 error = EPERM; 257 break; 258 } 259 } 260 /* if provider is marked for error, don't disturb */ 261 if (!error) 262 error = bp->bio_to->error; 263 if (error) { 264 bp->bio_error = error; 265 /* finish_stats(&bp->stats); */ 266 267 g_trace(G_T_BIO, 268 "bio_request(%p) from %p(%s) to %p(%s) cmd %d error %d\n", 269 bp, bp->bio_from, bp->bio_from->geom->name, 270 bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error); 271 g_bioq_enqueue_tail(bp, &g_bio_run_up); 272 mtx_lock(&Giant); 273 wakeup(&g_wait_up); 274 mtx_unlock(&Giant); 275 } else { 276 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 277 bp, bp->bio_from, bp->bio_from->geom->name, 278 bp->bio_to, bp->bio_to->name, bp->bio_cmd); 279 g_bioq_enqueue_tail(bp, &g_bio_run_down); 280 mtx_lock(&Giant); 281 wakeup(&g_wait_down); 282 mtx_unlock(&Giant); 283 } 284} 285 286void 287g_io_deliver(struct bio *bp) 288{ 289 290 g_trace(G_T_BIO, 291 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d", 292 bp, bp->bio_from, bp->bio_from->geom->name, 293 bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error); 294 /* finish_stats(&bp->stats); */ 295 296 g_bioq_enqueue_tail(bp, &g_bio_run_up); 297 298 mtx_lock(&Giant); 299 wakeup(&g_wait_up); 300 mtx_unlock(&Giant); 301} 302 303void 304g_io_schedule_down(struct thread *tp __unused) 305{ 306 struct bio *bp; 307 308 for(;;) { 309 bp = g_bioq_first(&g_bio_run_down); 310 if (bp == NULL) 311 break; 312 bp->bio_to->geom->start(bp); 313 } 314} 315 316void 317g_io_schedule_up(struct thread *tp __unused) 318{ 319 struct bio *bp; 320 struct g_consumer *cp; 321 322 for(;;) { 323 bp = g_bioq_first(&g_bio_run_up); 324 if (bp == NULL) 325 break; 326 327 cp = bp->bio_from; 328 329 bp->bio_flags |= BIO_DONE; 330 atomic_add_int(&cp->biocount, -1); 331 if (bp->bio_done != NULL) { 332 bp->bio_done(bp); 333 } else { 334 mtx_lock(&Giant); 335 wakeup(bp); 336 mtx_unlock(&Giant); 337 } 338 } 339} 340 341void * 342g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 343{ 344 struct bio *bp; 345 void *ptr; 346 int errorc; 347 348 do { 349 bp = g_new_bio(); 350 bp->bio_cmd = BIO_READ; 351 bp->bio_done = NULL; 352 bp->bio_offset = offset; 353 bp->bio_length = length; 354 ptr = g_malloc(length, M_WAITOK); 355 bp->bio_data = ptr; 356 g_io_request(bp, cp); 357 while ((bp->bio_flags & BIO_DONE) == 0) { 358 mtx_lock(&Giant); 359 tsleep(bp, 0, "g_read_data", hz / 10); 360 mtx_unlock(&Giant); 361 } 362 errorc = bp->bio_error; 363 if (error != NULL) 364 *error = errorc; 365 g_destroy_bio(bp); 366 if (errorc) { 367 g_free(ptr); 368 ptr = NULL; 369 } 370 if (errorc == EBUSY) 371 tsleep(&errorc, 0, "g_read_data_busy", hz); 372 } while (errorc == EBUSY); 373 return (ptr); 374} 375