geom_io.c revision 104665
1/*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD: head/sys/geom/geom_io.c 104665 2002-10-08 07:03:58Z phk $ 36 */ 37 38 39#include <sys/param.h> 40#ifndef _KERNEL 41#include <stdio.h> 42#include <string.h> 43#include <stdlib.h> 44#include <signal.h> 45#include <err.h> 46#include <sched.h> 47#else 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/malloc.h> 51#include <sys/bio.h> 52#endif 53 54#include <sys/errno.h> 55#include <geom/geom.h> 56#include <geom/geom_int.h> 57 58static struct g_bioq g_bio_run_down; 59static struct g_bioq g_bio_run_up; 60static struct g_bioq g_bio_idle; 61 62#include <machine/atomic.h> 63 64static void 65g_bioq_lock(struct g_bioq *bq) 66{ 67 68 mtx_lock(&bq->bio_queue_lock); 69} 70 71static void 72g_bioq_unlock(struct g_bioq *bq) 73{ 74 75 mtx_unlock(&bq->bio_queue_lock); 76} 77 78#if 0 79static void 80g_bioq_destroy(struct g_bioq *bq) 81{ 82 83 mtx_destroy(&bq->bio_queue_lock); 84} 85#endif 86 87static void 88g_bioq_init(struct g_bioq *bq) 89{ 90 91 TAILQ_INIT(&bq->bio_queue); 92 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 93} 94 95static struct bio * 96g_bioq_first(struct g_bioq *bq) 97{ 98 struct bio *bp; 99 100 g_bioq_lock(bq); 101 bp = TAILQ_FIRST(&bq->bio_queue); 102 if (bp != NULL) { 103 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 104 bq->bio_queue_length--; 105 } 106 g_bioq_unlock(bq); 107 return (bp); 108} 109 110static void 111g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq) 112{ 113 114 g_bioq_lock(rq); 115 TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue); 116 rq->bio_queue_length++; 117 g_bioq_unlock(rq); 118} 119 120struct bio * 121g_new_bio(void) 122{ 123 struct bio *bp; 124 125 bp = g_bioq_first(&g_bio_idle); 126 if (bp == NULL) 127 bp = g_malloc(sizeof *bp, M_NOWAIT | M_ZERO); 128 g_trace(G_T_BIO, "g_new_bio() = %p", bp); 129 return (bp); 130} 131 132void 133g_destroy_bio(struct bio *bp) 134{ 135 136 g_trace(G_T_BIO, "g_destroy_bio(%p)", bp); 137 bzero(bp, sizeof *bp); 138 g_bioq_enqueue_tail(bp, &g_bio_idle); 139} 140 141struct bio * 142g_clone_bio(struct bio *bp) 143{ 144 struct bio *bp2; 145 146 bp2 = g_new_bio(); 147 if (bp2 != NULL) { 148 bp2->bio_linkage = bp; 149 bp2->bio_cmd = bp->bio_cmd; 150 bp2->bio_length = bp->bio_length; 151 bp2->bio_offset = bp->bio_offset; 152 bp2->bio_data = bp->bio_data; 153 bp2->bio_attribute = bp->bio_attribute; 154 } 155 g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2); 156 return(bp2); 157} 158 159void 160g_io_init() 161{ 162 163 g_bioq_init(&g_bio_run_down); 164 g_bioq_init(&g_bio_run_up); 165 g_bioq_init(&g_bio_idle); 166} 167 168int 169g_io_setattr(const char *attr, struct g_consumer *cp, int len, void *ptr) 170{ 171 struct bio *bp; 172 int error; 173 174 g_trace(G_T_BIO, "bio_setattr(%s)", attr); 175 bp = g_new_bio(); 176 bp->bio_cmd = BIO_SETATTR; 177 bp->bio_done = NULL; 178 bp->bio_attribute = attr; 179 bp->bio_length = len; 180 bp->bio_data = ptr; 181 g_io_request(bp, cp); 182 error = biowait(bp, "gsetattr"); 183 g_destroy_bio(bp); 184 return (error); 185} 186 187 188int 189g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 190{ 191 struct bio *bp; 192 int error; 193 194 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 195 bp = g_new_bio(); 196 bp->bio_cmd = BIO_GETATTR; 197 bp->bio_done = NULL; 198 bp->bio_attribute = attr; 199 bp->bio_length = *len; 200 bp->bio_data = ptr; 201 g_io_request(bp, cp); 202 error = biowait(bp, "ggetattr"); 203 *len = bp->bio_completed; 204 g_destroy_bio(bp); 205 return (error); 206} 207 208void 209g_io_request(struct bio *bp, struct g_consumer *cp) 210{ 211 int error; 212 off_t excess; 213 214 KASSERT(cp != NULL, ("bio_request on thin air")); 215 error = 0; 216 bp->bio_from = cp; 217 bp->bio_to = cp->provider; 218 bp->bio_error = 0; 219 bp->bio_completed = 0; 220 221 /* begin_stats(&bp->stats); */ 222 223 atomic_add_int(&cp->biocount, 1); 224 /* Fail on unattached consumers */ 225 if (bp->bio_to == NULL) { 226 g_io_deliver(bp, ENXIO); 227 return; 228 } 229 /* Fail if access doesn't allow operation */ 230 switch(bp->bio_cmd) { 231 case BIO_READ: 232 case BIO_GETATTR: 233 if (cp->acr == 0) { 234 g_io_deliver(bp, EPERM); 235 return; 236 } 237 break; 238 case BIO_WRITE: 239 case BIO_DELETE: 240 if (cp->acw == 0) { 241 g_io_deliver(bp, EPERM); 242 return; 243 } 244 break; 245 case BIO_SETATTR: 246 /* XXX: Should ideally check for (cp->ace == 0) */ 247 if ((cp->acw == 0)) { 248#ifdef DIAGNOSTIC 249 printf("setattr on %s mode (%d,%d,%d)\n", 250 cp->provider->name, 251 cp->acr, cp->acw, cp->ace); 252#endif 253 g_io_deliver(bp, EPERM); 254 return; 255 } 256 break; 257 default: 258 g_io_deliver(bp, EPERM); 259 return; 260 } 261 /* if provider is marked for error, don't disturb. */ 262 if (bp->bio_to->error) { 263 g_io_deliver(bp, bp->bio_to->error); 264 return; 265 } 266 switch(bp->bio_cmd) { 267 case BIO_READ: 268 case BIO_WRITE: 269 case BIO_DELETE: 270 /* Reject requests past the end of media. */ 271 if (bp->bio_offset > bp->bio_to->mediasize) { 272 g_io_deliver(bp, EIO); 273 return; 274 } 275 /* Truncate requests to the end of providers media. */ 276 excess = bp->bio_offset + bp->bio_length; 277 if (excess > bp->bio_to->mediasize) { 278 excess -= bp->bio_to->mediasize; 279 bp->bio_length -= excess; 280 } 281 /* Deliver zero length transfers right here. */ 282 if (bp->bio_length == 0) { 283 g_io_deliver(bp, 0); 284 return; 285 } 286 break; 287 default: 288 break; 289 } 290 /* Pass it on down. */ 291 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 292 bp, bp->bio_from, bp->bio_from->geom->name, 293 bp->bio_to, bp->bio_to->name, bp->bio_cmd); 294 g_bioq_enqueue_tail(bp, &g_bio_run_down); 295 wakeup(&g_wait_down); 296} 297 298void 299g_io_deliver(struct bio *bp, int error) 300{ 301 302 g_trace(G_T_BIO, 303 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d", 304 bp, bp->bio_from, bp->bio_from->geom->name, 305 bp->bio_to, bp->bio_to->name, bp->bio_cmd, error); 306 /* finish_stats(&bp->stats); */ 307 308 bp->bio_error = error; 309 310 g_bioq_enqueue_tail(bp, &g_bio_run_up); 311 312 wakeup(&g_wait_up); 313} 314 315void 316g_io_schedule_down(struct thread *tp __unused) 317{ 318 struct bio *bp; 319 320 for(;;) { 321 bp = g_bioq_first(&g_bio_run_down); 322 if (bp == NULL) 323 break; 324 bp->bio_to->geom->start(bp); 325 } 326} 327 328void 329g_io_schedule_up(struct thread *tp __unused) 330{ 331 struct bio *bp; 332 struct g_consumer *cp; 333 334 for(;;) { 335 bp = g_bioq_first(&g_bio_run_up); 336 if (bp == NULL) 337 break; 338 339 cp = bp->bio_from; 340 341 atomic_add_int(&cp->biocount, -1); 342 biodone(bp); 343 } 344} 345 346void * 347g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 348{ 349 struct bio *bp; 350 void *ptr; 351 int errorc; 352 353 bp = g_new_bio(); 354 bp->bio_cmd = BIO_READ; 355 bp->bio_done = NULL; 356 bp->bio_offset = offset; 357 bp->bio_length = length; 358 ptr = g_malloc(length, M_WAITOK); 359 bp->bio_data = ptr; 360 g_io_request(bp, cp); 361 errorc = biowait(bp, "gread"); 362 if (error != NULL) 363 *error = errorc; 364 g_destroy_bio(bp); 365 if (errorc) { 366 g_free(ptr); 367 ptr = NULL; 368 } 369 return (ptr); 370} 371 372int 373g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 374{ 375 struct bio *bp; 376 int error; 377 378 bp = g_new_bio(); 379 bp->bio_cmd = BIO_WRITE; 380 bp->bio_done = NULL; 381 bp->bio_offset = offset; 382 bp->bio_length = length; 383 bp->bio_data = ptr; 384 g_io_request(bp, cp); 385 error = biowait(bp, "gwrite"); 386 g_destroy_bio(bp); 387 return (error); 388} 389