geom_io.c revision 104195
1/*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD: head/sys/geom/geom_io.c 104195 2002-09-30 08:54:46Z phk $ 36 */ 37 38 39#include <sys/param.h> 40#ifndef _KERNEL 41#include <stdio.h> 42#include <string.h> 43#include <stdlib.h> 44#include <signal.h> 45#include <err.h> 46#include <sched.h> 47#else 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/malloc.h> 51#include <sys/bio.h> 52#endif 53 54#include <sys/errno.h> 55#include <geom/geom.h> 56#include <geom/geom_int.h> 57 58static struct g_bioq g_bio_run_down; 59static struct g_bioq g_bio_run_up; 60static struct g_bioq g_bio_idle; 61 62#include <machine/atomic.h> 63 64static void 65g_bioq_lock(struct g_bioq *bq) 66{ 67 68 mtx_lock(&bq->bio_queue_lock); 69} 70 71static void 72g_bioq_unlock(struct g_bioq *bq) 73{ 74 75 mtx_unlock(&bq->bio_queue_lock); 76} 77 78#if 0 79static void 80g_bioq_destroy(struct g_bioq *bq) 81{ 82 83 mtx_destroy(&bq->bio_queue_lock); 84} 85#endif 86 87static void 88g_bioq_init(struct g_bioq *bq) 89{ 90 91 TAILQ_INIT(&bq->bio_queue); 92 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 93} 94 95static struct bio * 96g_bioq_first(struct g_bioq *bq) 97{ 98 struct bio *bp; 99 100 g_bioq_lock(bq); 101 bp = TAILQ_FIRST(&bq->bio_queue); 102 if (bp != NULL) { 103 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 104 bq->bio_queue_length--; 105 } 106 g_bioq_unlock(bq); 107 return (bp); 108} 109 110static void 111g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq) 112{ 113 114 g_bioq_lock(rq); 115 TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue); 116 rq->bio_queue_length++; 117 g_bioq_unlock(rq); 118} 119 120struct bio * 121g_new_bio(void) 122{ 123 struct bio *bp; 124 125 bp = g_bioq_first(&g_bio_idle); 126 if (bp == NULL) 127 bp = g_malloc(sizeof *bp, M_NOWAIT | M_ZERO); 128 g_trace(G_T_BIO, "g_new_bio() = %p", bp); 129 return (bp); 130} 131 132void 133g_destroy_bio(struct bio *bp) 134{ 135 136 g_trace(G_T_BIO, "g_destroy_bio(%p)", bp); 137 bzero(bp, sizeof *bp); 138 g_bioq_enqueue_tail(bp, &g_bio_idle); 139} 140 141struct bio * 142g_clone_bio(struct bio *bp) 143{ 144 struct bio *bp2; 145 146 bp2 = g_new_bio(); 147 if (bp2 != NULL) { 148 bp2->bio_linkage = bp; 149 bp2->bio_cmd = bp->bio_cmd; 150 bp2->bio_length = bp->bio_length; 151 bp2->bio_offset = bp->bio_offset; 152 bp2->bio_data = bp->bio_data; 153 bp2->bio_attribute = bp->bio_attribute; 154 } 155 g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2); 156 return(bp2); 157} 158 159void 160g_io_init() 161{ 162 163 g_bioq_init(&g_bio_run_down); 164 g_bioq_init(&g_bio_run_up); 165 g_bioq_init(&g_bio_idle); 166} 167 168int 169g_io_setattr(const char *attr, struct g_consumer *cp, int len, void *ptr) 170{ 171 struct bio *bp; 172 int error; 173 174 g_trace(G_T_BIO, "bio_setattr(%s)", attr); 175 bp = g_new_bio(); 176 bp->bio_cmd = BIO_SETATTR; 177 bp->bio_done = NULL; 178 bp->bio_attribute = attr; 179 bp->bio_length = len; 180 bp->bio_data = ptr; 181 g_io_request(bp, cp); 182 error = biowait(bp, "gsetattr"); 183 g_destroy_bio(bp); 184 return (error); 185} 186 187 188int 189g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 190{ 191 struct bio *bp; 192 int error; 193 194 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 195 do { 196 bp = g_new_bio(); 197 bp->bio_cmd = BIO_GETATTR; 198 bp->bio_done = NULL; 199 bp->bio_attribute = attr; 200 bp->bio_length = *len; 201 bp->bio_data = ptr; 202 g_io_request(bp, cp); 203 error = biowait(bp, "ggetattr"); 204 *len = bp->bio_completed; 205 g_destroy_bio(bp); 206 if (error == EBUSY) 207 tsleep(&error, 0, "getattr_busy", hz); 208 209 } while(error == EBUSY); 210 return (error); 211} 212 213void 214g_io_request(struct bio *bp, struct g_consumer *cp) 215{ 216 int error; 217 off_t excess; 218 219 KASSERT(cp != NULL, ("bio_request on thin air")); 220 error = 0; 221 bp->bio_from = cp; 222 bp->bio_to = cp->provider; 223 bp->bio_error = 0; 224 bp->bio_completed = 0; 225 226 /* begin_stats(&bp->stats); */ 227 228 atomic_add_int(&cp->biocount, 1); 229 /* Fail on unattached consumers */ 230 if (bp->bio_to == NULL) { 231 g_io_deliver(bp, ENXIO); 232 return; 233 } 234 /* Fail if access doesn't allow operation */ 235 switch(bp->bio_cmd) { 236 case BIO_READ: 237 case BIO_GETATTR: 238 if (cp->acr == 0) { 239 g_io_deliver(bp, EPERM); 240 return; 241 } 242 break; 243 case BIO_WRITE: 244 case BIO_DELETE: 245 if (cp->acw == 0) { 246 g_io_deliver(bp, EPERM); 247 return; 248 } 249 break; 250 case BIO_SETATTR: 251 /* XXX: Should ideally check for (cp->ace == 0) */ 252 if ((cp->acw == 0)) { 253 printf("setattr on %s mode (%d,%d,%d)\n", 254 cp->provider->name, 255 cp->acr, cp->acw, cp->ace); 256 g_io_deliver(bp, EPERM); 257 return; 258 } 259 break; 260 default: 261 g_io_deliver(bp, EPERM); 262 return; 263 } 264 /* if provider is marked for error, don't disturb. */ 265 if (bp->bio_to->error) { 266 g_io_deliver(bp, bp->bio_to->error); 267 return; 268 } 269 switch(bp->bio_cmd) { 270 case BIO_READ: 271 case BIO_WRITE: 272 case BIO_DELETE: 273 /* Reject requests past the end of media. */ 274 if (bp->bio_offset > bp->bio_to->mediasize) { 275 g_io_deliver(bp, EIO); 276 return; 277 } 278 /* Truncate requests to the end of providers media. */ 279 excess = bp->bio_offset + bp->bio_length; 280 if (excess > bp->bio_to->mediasize) { 281 excess -= bp->bio_to->mediasize; 282 bp->bio_length -= excess; 283 } 284 /* Deliver zero length transfers right here. */ 285 if (bp->bio_length == 0) { 286 g_io_deliver(bp, 0); 287 return; 288 } 289 break; 290 default: 291 break; 292 } 293 /* Pass it on down. */ 294 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 295 bp, bp->bio_from, bp->bio_from->geom->name, 296 bp->bio_to, bp->bio_to->name, bp->bio_cmd); 297 g_bioq_enqueue_tail(bp, &g_bio_run_down); 298 wakeup(&g_wait_down); 299} 300 301void 302g_io_deliver(struct bio *bp, int error) 303{ 304 305 g_trace(G_T_BIO, 306 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d", 307 bp, bp->bio_from, bp->bio_from->geom->name, 308 bp->bio_to, bp->bio_to->name, bp->bio_cmd, error); 309 /* finish_stats(&bp->stats); */ 310 311 bp->bio_error = error; 312 313 g_bioq_enqueue_tail(bp, &g_bio_run_up); 314 315 wakeup(&g_wait_up); 316} 317 318void 319g_io_schedule_down(struct thread *tp __unused) 320{ 321 struct bio *bp; 322 323 for(;;) { 324 bp = g_bioq_first(&g_bio_run_down); 325 if (bp == NULL) 326 break; 327 bp->bio_to->geom->start(bp); 328 } 329} 330 331void 332g_io_schedule_up(struct thread *tp __unused) 333{ 334 struct bio *bp; 335 struct g_consumer *cp; 336 337 for(;;) { 338 bp = g_bioq_first(&g_bio_run_up); 339 if (bp == NULL) 340 break; 341 342 cp = bp->bio_from; 343 344 atomic_add_int(&cp->biocount, -1); 345 biodone(bp); 346 } 347} 348 349void * 350g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 351{ 352 struct bio *bp; 353 void *ptr; 354 int errorc; 355 356 do { 357 bp = g_new_bio(); 358 bp->bio_cmd = BIO_READ; 359 bp->bio_done = NULL; 360 bp->bio_offset = offset; 361 bp->bio_length = length; 362 ptr = g_malloc(length, M_WAITOK); 363 bp->bio_data = ptr; 364 g_io_request(bp, cp); 365 errorc = biowait(bp, "gread"); 366 if (error != NULL) 367 *error = errorc; 368 g_destroy_bio(bp); 369 if (errorc) { 370 g_free(ptr); 371 ptr = NULL; 372 } 373 if (errorc == EBUSY) 374 tsleep(&errorc, 0, "g_read_data_busy", hz); 375 } while (errorc == EBUSY); 376 return (ptr); 377} 378 379int 380g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 381{ 382 struct bio *bp; 383 int error; 384 385 bp = g_new_bio(); 386 bp->bio_cmd = BIO_WRITE; 387 bp->bio_done = NULL; 388 bp->bio_offset = offset; 389 bp->bio_length = length; 390 bp->bio_data = ptr; 391 g_io_request(bp, cp); 392 error = biowait(bp, "gwrite"); 393 g_destroy_bio(bp); 394 return (error); 395} 396