rf_diskqueue.c revision 1.3
1/* $NetBSD: rf_diskqueue.c,v 1.3 1999/01/14 20:29:38 oster Exp $ */ 2/* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: Mark Holland 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29/**************************************************************************************** 30 * 31 * rf_diskqueue.c -- higher-level disk queue code 32 * 33 * the routines here are a generic wrapper around the actual queueing 34 * routines. The code here implements thread scheduling, synchronization, 35 * and locking ops (see below) on top of the lower-level queueing code. 36 * 37 * to support atomic RMW, we implement "locking operations". When a locking op 38 * is dispatched to the lower levels of the driver, the queue is locked, and no further 39 * I/Os are dispatched until the queue receives & completes a corresponding "unlocking 40 * operation". This code relies on the higher layers to guarantee that a locking 41 * op will always be eventually followed by an unlocking op. The model is that 42 * the higher layers are structured so locking and unlocking ops occur in pairs, i.e. 43 * an unlocking op cannot be generated until after a locking op reports completion. 44 * There is no good way to check to see that an unlocking op "corresponds" to the 45 * op that currently has the queue locked, so we make no such attempt. Since by 46 * definition there can be only one locking op outstanding on a disk, this should 47 * not be a problem. 48 * 49 * In the kernel, we allow multiple I/Os to be concurrently dispatched to the disk 50 * driver. In order to support locking ops in this environment, when we decide to 51 * do a locking op, we stop dispatching new I/Os and wait until all dispatched I/Os 52 * have completed before dispatching the locking op. 53 * 54 * Unfortunately, the code is different in the 3 different operating states 55 * (user level, kernel, simulator). In the kernel, I/O is non-blocking, and 56 * we have no disk threads to dispatch for us. Therefore, we have to dispatch 57 * new I/Os to the scsi driver at the time of enqueue, and also at the time 58 * of completion. At user level, I/O is blocking, and so only the disk threads 59 * may dispatch I/Os. Thus at user level, all we can do at enqueue time is 60 * enqueue and wake up the disk thread to do the dispatch. 61 * 62 ***************************************************************************************/ 63 64/* 65 * : 66 * 67 * Log: rf_diskqueue.c,v 68 * Revision 1.50 1996/08/07 21:08:38 jimz 69 * b_proc -> kb_proc 70 * 71 * Revision 1.49 1996/07/05 20:36:14 jimz 72 * make rf_ConfigureDiskQueueSystem return 0 73 * 74 * Revision 1.48 1996/06/18 20:53:11 jimz 75 * fix up disk queueing (remove configure routine, 76 * add shutdown list arg to create routines) 77 * 78 * Revision 1.47 1996/06/14 14:16:36 jimz 79 * fix handling of bogus queue type 80 * 81 * Revision 1.46 1996/06/13 20:41:44 jimz 82 * add scan, cscan, random queueing 83 * 84 * Revision 1.45 1996/06/11 01:27:50 jimz 85 * Fixed bug where diskthread shutdown would crash or hang. This 86 * turned out to be two distinct bugs: 87 * (1) [crash] The thread shutdown code wasn't properly waiting for 88 * all the diskthreads to complete. This caused diskthreads that were 89 * exiting+cleaning up to unlock a destroyed mutex. 90 * (2) [hang] TerminateDiskQueues wasn't locking, and DiskIODequeue 91 * only checked for termination _after_ a wakeup if the queues were 92 * empty. This was a race where the termination wakeup could be lost 93 * by the dequeueing thread, and the system would hang waiting for the 94 * thread to exit, while the thread waited for an I/O or a signal to 95 * check the termination flag. 96 * 97 * Revision 1.44 1996/06/10 11:55:47 jimz 98 * Straightened out some per-array/not-per-array distinctions, fixed 99 * a couple bugs related to confusion. Added shutdown lists. Removed 100 * layout shutdown function (now subsumed by shutdown lists). 101 * 102 * Revision 1.43 1996/06/09 02:36:46 jimz 103 * lots of little crufty cleanup- fixup whitespace 104 * issues, comment #ifdefs, improve typing in some 105 * places (esp size-related) 106 * 107 * Revision 1.42 1996/06/07 22:26:27 jimz 108 * type-ify which_ru (RF_ReconUnitNum_t) 109 * 110 * Revision 1.41 1996/06/07 21:33:04 jimz 111 * begin using consistent types for sector numbers, 112 * stripe numbers, row+col numbers, recon unit numbers 113 * 114 * Revision 1.40 1996/06/06 17:28:04 jimz 115 * track sector number of last I/O dequeued 116 * 117 * Revision 1.39 1996/06/06 01:14:13 jimz 118 * fix crashing bug when tracerec is NULL (ie, from copyback) 119 * initialize req->queue 120 * 121 * Revision 1.38 1996/06/05 19:38:32 jimz 122 * fixed up disk queueing types config 123 * added sstf disk queueing 124 * fixed exit bug on diskthreads (ref-ing bad mem) 125 * 126 * Revision 1.37 1996/06/05 18:06:02 jimz 127 * Major code cleanup. The Great Renaming is now done. 128 * Better modularity. Better typing. Fixed a bunch of 129 * synchronization bugs. Made a lot of global stuff 130 * per-desc or per-array. Removed dead code. 131 * 132 * Revision 1.36 1996/05/30 23:22:16 jimz 133 * bugfixes of serialization, timing problems 134 * more cleanup 135 * 136 * Revision 1.35 1996/05/30 12:59:18 jimz 137 * make etimer happier, more portable 138 * 139 * Revision 1.34 1996/05/30 11:29:41 jimz 140 * Numerous bug fixes. Stripe lock release code disagreed with the taking code 141 * about when stripes should be locked (I made it consistent: no parity, no lock) 142 * There was a lot of extra serialization of I/Os which I've removed- a lot of 143 * it was to calculate values for the cache code, which is no longer with us. 144 * More types, function, macro cleanup. Added code to properly quiesce the array 145 * on shutdown. Made a lot of stuff array-specific which was (bogusly) general 146 * before. Fixed memory allocation, freeing bugs. 147 * 148 * Revision 1.33 1996/05/27 18:56:37 jimz 149 * more code cleanup 150 * better typing 151 * compiles in all 3 environments 152 * 153 * Revision 1.32 1996/05/24 22:17:04 jimz 154 * continue code + namespace cleanup 155 * typed a bunch of flags 156 * 157 * Revision 1.31 1996/05/24 01:59:45 jimz 158 * another checkpoint in code cleanup for release 159 * time to sync kernel tree 160 * 161 * Revision 1.30 1996/05/23 21:46:35 jimz 162 * checkpoint in code cleanup (release prep) 163 * lots of types, function names have been fixed 164 * 165 * Revision 1.29 1996/05/23 00:33:23 jimz 166 * code cleanup: move all debug decls to rf_options.c, all extern 167 * debug decls to rf_options.h, all debug vars preceded by rf_ 168 * 169 * Revision 1.28 1996/05/20 16:14:29 jimz 170 * switch to rf_{mutex,cond}_{init,destroy} 171 * 172 * Revision 1.27 1996/05/18 19:51:34 jimz 173 * major code cleanup- fix syntax, make some types consistent, 174 * add prototypes, clean out dead code, et cetera 175 * 176 * Revision 1.26 1996/05/16 19:21:49 wvcii 177 * fixed typo in init_dqd 178 * 179 * Revision 1.25 1996/05/16 16:02:51 jimz 180 * switch to RF_FREELIST stuff for DiskQueueData 181 * 182 * Revision 1.24 1996/05/10 16:24:14 jimz 183 * new cvscan function names 184 * 185 * Revision 1.23 1996/05/01 16:27:54 jimz 186 * don't use ccmn bp management 187 * 188 * Revision 1.22 1995/12/12 18:10:06 jimz 189 * MIN -> RF_MIN, MAX -> RF_MAX, ASSERT -> RF_ASSERT 190 * fix 80-column brain damage in comments 191 * 192 * Revision 1.21 1995/12/01 15:59:59 root 193 * added copyright info 194 * 195 * Revision 1.20 1995/11/07 16:27:20 wvcii 196 * added Peek() function to diskqueuesw 197 * non-locking accesses are never blocked (assume clients enforce proper 198 * respect for lock acquisition) 199 * 200 * Revision 1.19 1995/10/05 18:56:52 jimz 201 * fix req handling in IOComplete 202 * 203 * Revision 1.18 1995/10/04 20:13:50 wvcii 204 * added asserts to monitor numOutstanding queueLength 205 * 206 * Revision 1.17 1995/10/04 07:43:52 wvcii 207 * queue->numOutstanding now valid for user & sim 208 * added queue->queueLength 209 * user tested & verified, sim untested 210 * 211 * Revision 1.16 1995/09/12 00:21:19 wvcii 212 * added support for tracing disk queue time 213 * 214 */ 215 216#include "rf_types.h" 217#include "rf_threadstuff.h" 218#include "rf_threadid.h" 219#include "rf_raid.h" 220#include "rf_diskqueue.h" 221#include "rf_alloclist.h" 222#include "rf_acctrace.h" 223#include "rf_etimer.h" 224#include "rf_configure.h" 225#include "rf_general.h" 226#include "rf_freelist.h" 227#include "rf_debugprint.h" 228#include "rf_shutdown.h" 229#include "rf_cvscan.h" 230#include "rf_sstf.h" 231#include "rf_fifo.h" 232 233#ifdef SIMULATE 234#include "rf_diskevent.h" 235#endif /* SIMULATE */ 236 237#if !defined(__NetBSD__) 238extern struct buf *ubc_bufget(); 239#endif 240 241static int init_dqd(RF_DiskQueueData_t *); 242static void clean_dqd(RF_DiskQueueData_t *); 243static void rf_ShutdownDiskQueueSystem(void *); 244/* From rf_kintf.c */ 245int rf_DispatchKernelIO(RF_DiskQueue_t *,RF_DiskQueueData_t *); 246 247 248#define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL) 249#define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL) 250#define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL) 251#define Dprintf4(s,a,b,c,d) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL) 252#define Dprintf5(s,a,b,c,d,e) if (rf_queueDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL) 253 254#if !defined(KERNEL) && !defined(SIMULATE) 255 256/* queue must be locked before invoking this */ 257#define SIGNAL_DISK_QUEUE(_q_,_wh_) \ 258{ \ 259 if ( (_q_)->numWaiting > 0) { \ 260 (_q_)->numWaiting--; \ 261 RF_SIGNAL_COND( ((_q_)->cond) ); \ 262 } \ 263} 264 265/* queue must be locked before invoking this */ 266#define WAIT_DISK_QUEUE(_q_,_wh_) \ 267{ \ 268 (_q_)->numWaiting++; \ 269 RF_WAIT_COND( ((_q_)->cond), ((_q_)->mutex) ); \ 270} 271 272#else /* !defined(KERNEL) && !defined(SIMULATE) */ 273 274#define SIGNAL_DISK_QUEUE(_q_,_wh_) 275#define WAIT_DISK_QUEUE(_q_,_wh_) 276 277#endif /* !defined(KERNEL) && !defined(SIMULATE) */ 278 279/***************************************************************************************** 280 * 281 * the disk queue switch defines all the functions used in the different queueing 282 * disciplines 283 * queue ID, init routine, enqueue routine, dequeue routine 284 * 285 ****************************************************************************************/ 286 287static RF_DiskQueueSW_t diskqueuesw[] = { 288 {"fifo", /* FIFO */ 289 rf_FifoCreate, 290 rf_FifoEnqueue, 291 rf_FifoDequeue, 292 rf_FifoPeek, 293 rf_FifoPromote}, 294 295 {"cvscan", /* cvscan */ 296 rf_CvscanCreate, 297 rf_CvscanEnqueue, 298 rf_CvscanDequeue, 299 rf_CvscanPeek, 300 rf_CvscanPromote }, 301 302 {"sstf", /* shortest seek time first */ 303 rf_SstfCreate, 304 rf_SstfEnqueue, 305 rf_SstfDequeue, 306 rf_SstfPeek, 307 rf_SstfPromote}, 308 309 {"scan", /* SCAN (two-way elevator) */ 310 rf_ScanCreate, 311 rf_SstfEnqueue, 312 rf_ScanDequeue, 313 rf_ScanPeek, 314 rf_SstfPromote}, 315 316 {"cscan", /* CSCAN (one-way elevator) */ 317 rf_CscanCreate, 318 rf_SstfEnqueue, 319 rf_CscanDequeue, 320 rf_CscanPeek, 321 rf_SstfPromote}, 322 323#if !defined(KERNEL) && RF_INCLUDE_QUEUE_RANDOM > 0 324 /* to make a point to Chris :-> */ 325 {"random", /* random */ 326 rf_FifoCreate, 327 rf_FifoEnqueue, 328 rf_RandomDequeue, 329 rf_RandomPeek, 330 rf_FifoPromote}, 331#endif /* !KERNEL && RF_INCLUDE_QUEUE_RANDOM > 0 */ 332}; 333#define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t)) 334 335static RF_FreeList_t *rf_dqd_freelist; 336 337#define RF_MAX_FREE_DQD 256 338#define RF_DQD_INC 16 339#define RF_DQD_INITIAL 64 340 341#ifdef __NetBSD__ 342#ifdef _KERNEL 343#include <sys/buf.h> 344#endif 345#endif 346 347static int init_dqd(dqd) 348 RF_DiskQueueData_t *dqd; 349{ 350#ifdef KERNEL 351#ifdef __NetBSD__ 352 /* XXX not sure if the following malloc is appropriate... probably not quite... */ 353 dqd->bp = (struct buf *) malloc( sizeof(struct buf), M_DEVBUF, M_NOWAIT); 354 /* XXX */ 355 /* printf("NEED TO IMPLEMENT THIS BETTER!\n"); */ 356#else 357 dqd->bp = ubc_bufget(); 358#endif 359 if (dqd->bp == NULL) { 360 return(ENOMEM); 361 } 362#ifdef __NetBSD__ 363 memset(dqd->bp,0,sizeof(struct buf)); /* if you don't do it, nobody else will.. */ 364#endif 365#endif /* KERNEL */ 366 return(0); 367} 368 369static void clean_dqd(dqd) 370 RF_DiskQueueData_t *dqd; 371{ 372#ifdef KERNEL 373#ifdef __NetBSD__ 374 /* printf("NEED TO IMPLEMENT THIS BETTER(2)!\n"); */ 375 /* XXX ? */ 376 free( dqd->bp, M_DEVBUF ); 377#else 378 ubc_buffree(dqd->bp); 379#endif 380 381#endif /* KERNEL */ 382} 383 384/* configures a single disk queue */ 385static int config_disk_queue( 386 RF_Raid_t *raidPtr, 387 RF_DiskQueue_t *diskqueue, 388 RF_RowCol_t r, /* row & col -- debug only. BZZT not any more... */ 389 RF_RowCol_t c, 390 RF_DiskQueueSW_t *p, 391 RF_SectorCount_t sectPerDisk, 392 dev_t dev, 393 int maxOutstanding, 394 RF_ShutdownList_t **listp, 395 RF_AllocListElem_t *clList) 396{ 397 int rc; 398 399 diskqueue->row = r; 400 diskqueue->col = c; 401 diskqueue->qPtr = p; 402 diskqueue->qHdr = (p->Create)(sectPerDisk, clList, listp); 403 diskqueue->dev = dev; 404 diskqueue->numOutstanding = 0; 405 diskqueue->queueLength = 0; 406 diskqueue->maxOutstanding = maxOutstanding; 407 diskqueue->curPriority = RF_IO_NORMAL_PRIORITY; 408 diskqueue->nextLockingOp = NULL; 409 diskqueue->unlockingOp = NULL; 410 diskqueue->numWaiting=0; 411 diskqueue->flags = 0; 412 diskqueue->raidPtr = raidPtr; 413#if defined(__NetBSD__) && defined(_KERNEL) 414 diskqueue->rf_cinfo = &raidPtr->raid_cinfo[r][c]; 415#endif 416 rc = rf_create_managed_mutex(listp, &diskqueue->mutex); 417 if (rc) { 418 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 419 __LINE__, rc); 420 return(rc); 421 } 422 rc = rf_create_managed_cond(listp, &diskqueue->cond); 423 if (rc) { 424 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, 425 __LINE__, rc); 426 return(rc); 427 } 428 return(0); 429} 430 431static void rf_ShutdownDiskQueueSystem(ignored) 432 void *ignored; 433{ 434 RF_FREELIST_DESTROY_CLEAN(rf_dqd_freelist,next,(RF_DiskQueueData_t *),clean_dqd); 435} 436 437int rf_ConfigureDiskQueueSystem(listp) 438 RF_ShutdownList_t **listp; 439{ 440 int rc; 441 442 RF_FREELIST_CREATE(rf_dqd_freelist, RF_MAX_FREE_DQD, 443 RF_DQD_INC, sizeof(RF_DiskQueueData_t)); 444 if (rf_dqd_freelist == NULL) 445 return(ENOMEM); 446 rc = rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL); 447 if (rc) { 448 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 449 __FILE__, __LINE__, rc); 450 rf_ShutdownDiskQueueSystem(NULL); 451 return(rc); 452 } 453 RF_FREELIST_PRIME_INIT(rf_dqd_freelist, RF_DQD_INITIAL,next, 454 (RF_DiskQueueData_t *),init_dqd); 455 return(0); 456} 457 458#ifndef KERNEL 459/* this is called prior to shutdown to wakeup everyone waiting on a disk queue 460 * and tell them to exit 461 */ 462void rf_TerminateDiskQueues(raidPtr) 463 RF_Raid_t *raidPtr; 464{ 465 RF_RowCol_t r, c; 466 467 raidPtr->terminate_disk_queues = 1; 468 for (r=0; r<raidPtr->numRow; r++) { 469 for (c=0; c<raidPtr->numCol + ((r==0) ? raidPtr->numSpare : 0); c++) { 470 RF_LOCK_QUEUE_MUTEX(&raidPtr->Queues[r][c], "TerminateDiskQueues"); 471 RF_BROADCAST_COND(raidPtr->Queues[r][c].cond); 472 RF_UNLOCK_QUEUE_MUTEX(&raidPtr->Queues[r][c], "TerminateDiskQueues"); 473 } 474 } 475} 476#endif /* !KERNEL */ 477 478int rf_ConfigureDiskQueues( 479 RF_ShutdownList_t **listp, 480 RF_Raid_t *raidPtr, 481 RF_Config_t *cfgPtr) 482{ 483 RF_DiskQueue_t **diskQueues, *spareQueues; 484 RF_DiskQueueSW_t *p; 485 RF_RowCol_t r, c; 486 int rc, i; 487 488 raidPtr->maxQueueDepth = cfgPtr->maxOutstandingDiskReqs; 489 490 for(p=NULL,i=0;i<NUM_DISK_QUEUE_TYPES;i++) { 491 if (!strcmp(diskqueuesw[i].queueType, cfgPtr->diskQueueType)) { 492 p = &diskqueuesw[i]; 493 break; 494 } 495 } 496 if (p == NULL) { 497 RF_ERRORMSG2("Unknown queue type \"%s\". Using %s\n",cfgPtr->diskQueueType, diskqueuesw[0].queueType); 498 p = &diskqueuesw[0]; 499 } 500 501 RF_CallocAndAdd(diskQueues, raidPtr->numRow, sizeof(RF_DiskQueue_t *), (RF_DiskQueue_t **), raidPtr->cleanupList); 502 if (diskQueues == NULL) { 503 return(ENOMEM); 504 } 505 raidPtr->Queues = diskQueues; 506 for (r=0; r<raidPtr->numRow; r++) { 507 RF_CallocAndAdd(diskQueues[r], raidPtr->numCol + ((r==0) ? raidPtr->numSpare : 0), sizeof(RF_DiskQueue_t), (RF_DiskQueue_t *), raidPtr->cleanupList); 508 if (diskQueues[r] == NULL) 509 return(ENOMEM); 510 for (c=0; c<raidPtr->numCol; c++) { 511 rc = config_disk_queue(raidPtr, &diskQueues[r][c], r, c, p, 512 raidPtr->sectorsPerDisk, raidPtr->Disks[r][c].dev, 513 cfgPtr->maxOutstandingDiskReqs, listp, raidPtr->cleanupList); 514 if (rc) 515 return(rc); 516 } 517 } 518 519 spareQueues = &raidPtr->Queues[0][raidPtr->numCol]; 520 for (r=0; r<raidPtr->numSpare; r++) { 521 rc = config_disk_queue(raidPtr, &spareQueues[r], 522 0, raidPtr->numCol+r, p, 523 raidPtr->sectorsPerDisk, 524 raidPtr->Disks[0][raidPtr->numCol+r].dev, 525 cfgPtr->maxOutstandingDiskReqs, listp, 526 raidPtr->cleanupList); 527 if (rc) 528 return(rc); 529 } 530 return(0); 531} 532 533/* Enqueue a disk I/O 534 * 535 * Unfortunately, we have to do things differently in the different 536 * environments (simulator, user-level, kernel). 537 * At user level, all I/O is blocking, so we have 1 or more threads/disk 538 * and the thread that enqueues is different from the thread that dequeues. 539 * In the kernel, I/O is non-blocking and so we'd like to have multiple 540 * I/Os outstanding on the physical disks when possible. 541 * 542 * when any request arrives at a queue, we have two choices: 543 * dispatch it to the lower levels 544 * queue it up 545 * 546 * kernel rules for when to do what: 547 * locking request: queue empty => dispatch and lock queue, 548 * else queue it 549 * unlocking req : always dispatch it 550 * normal req : queue empty => dispatch it & set priority 551 * queue not full & priority is ok => dispatch it 552 * else queue it 553 * 554 * user-level rules: 555 * always enqueue. In the special case of an unlocking op, enqueue 556 * in a special way that will cause the unlocking op to be the next 557 * thing dequeued. 558 * 559 * simulator rules: 560 * Do the same as at user level, with the sleeps and wakeups suppressed. 561 */ 562void rf_DiskIOEnqueue(queue, req, pri) 563 RF_DiskQueue_t *queue; 564 RF_DiskQueueData_t *req; 565 int pri; 566{ 567 int tid; 568 569 RF_ETIMER_START(req->qtime); 570 rf_get_threadid(tid); 571 RF_ASSERT(req->type == RF_IO_TYPE_NOP || req->numSector); 572 req->priority = pri; 573 574 if (rf_queueDebug && (req->numSector == 0)) { 575 printf("Warning: Enqueueing zero-sector access\n"); 576 } 577 578#ifdef KERNEL 579 /* 580 * kernel 581 */ 582 RF_LOCK_QUEUE_MUTEX( queue, "DiskIOEnqueue" ); 583 /* locking request */ 584 if (RF_LOCKING_REQ(req)) { 585 if (RF_QUEUE_EMPTY(queue)) { 586 Dprintf3("Dispatching pri %d locking op to r %d c %d (queue empty)\n",pri,queue->row, queue->col); 587 RF_LOCK_QUEUE(queue); 588 rf_DispatchKernelIO(queue, req); 589 } else { 590 queue->queueLength++; /* increment count of number of requests waiting in this queue */ 591 Dprintf3("Enqueueing pri %d locking op to r %d c %d (queue not empty)\n",pri,queue->row, queue->col); 592 req->queue = (void *)queue; 593 (queue->qPtr->Enqueue)(queue->qHdr, req, pri); 594 } 595 } 596 /* unlocking request */ 597 else if (RF_UNLOCKING_REQ(req)) { /* we'll do the actual unlock when this I/O completes */ 598 Dprintf3("Dispatching pri %d unlocking op to r %d c %d\n",pri,queue->row, queue->col); 599 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 600 rf_DispatchKernelIO(queue, req); 601 } 602 /* normal request */ 603 else if (RF_OK_TO_DISPATCH(queue, req)) { 604 Dprintf3("Dispatching pri %d regular op to r %d c %d (ok to dispatch)\n",pri,queue->row, queue->col); 605 rf_DispatchKernelIO(queue, req); 606 } else { 607 queue->queueLength++; /* increment count of number of requests waiting in this queue */ 608 Dprintf3("Enqueueing pri %d regular op to r %d c %d (not ok to dispatch)\n",pri,queue->row, queue->col); 609 req->queue = (void *)queue; 610 (queue->qPtr->Enqueue)(queue->qHdr, req, pri); 611 } 612 RF_UNLOCK_QUEUE_MUTEX( queue, "DiskIOEnqueue" ); 613 614#else /* KERNEL */ 615 /* 616 * user-level 617 */ 618 RF_LOCK_QUEUE_MUTEX( queue, "DiskIOEnqueue" ); 619 queue->queueLength++; /* increment count of number of requests waiting in this queue */ 620 /* unlocking request */ 621 if (RF_UNLOCKING_REQ(req)) { 622 Dprintf4("[%d] enqueueing pri %d unlocking op & signalling r %d c %d\n", tid, pri, queue->row, queue->col); 623 RF_ASSERT(RF_QUEUE_LOCKED(queue) && queue->unlockingOp == NULL); 624 queue->unlockingOp = req; 625 } 626 /* locking and normal requests */ 627 else { 628 req->queue = (void *)queue; 629 Dprintf5("[%d] enqueueing pri %d %s op & signalling r %d c %d\n", tid, pri, 630 (RF_LOCKING_REQ(req)) ? "locking" : "regular",queue->row,queue->col); 631 (queue->qPtr->Enqueue)(queue->qHdr, req, pri); 632 } 633 SIGNAL_DISK_QUEUE( queue, "DiskIOEnqueue"); 634 RF_UNLOCK_QUEUE_MUTEX( queue, "DiskIOEnqueue" ); 635#endif /* KERNEL */ 636} 637 638#if !defined(KERNEL) && !defined(SIMULATE) 639/* user-level only: tell all threads to wake up & recheck the queue */ 640void rf_BroadcastOnQueue(queue) 641 RF_DiskQueue_t *queue; 642{ 643 int i; 644 645 if (queue->maxOutstanding > 1) for (i=0; i<queue->maxOutstanding; i++) { 646 SIGNAL_DISK_QUEUE(queue, "BroadcastOnQueue" ); 647 } 648} 649#endif /* !KERNEL && !SIMULATE */ 650 651#ifndef KERNEL /* not used in kernel */ 652 653RF_DiskQueueData_t *rf_DiskIODequeue(queue) 654 RF_DiskQueue_t *queue; 655{ 656 RF_DiskQueueData_t *p, *headItem; 657 int tid; 658 659 rf_get_threadid(tid); 660 RF_LOCK_QUEUE_MUTEX( queue, "DiskIODequeue" ); 661 for (p=NULL; !p; ) { 662 if (queue->unlockingOp) { 663 /* unlocking request */ 664 RF_ASSERT(RF_QUEUE_LOCKED(queue)); 665 p = queue->unlockingOp; 666 queue->unlockingOp = NULL; 667 Dprintf4("[%d] dequeueing pri %d unlocking op r %d c %d\n", tid, p->priority, queue->row,queue->col); 668 } 669 else { 670 headItem = (queue->qPtr->Peek)(queue->qHdr); 671 if (headItem) { 672 if (RF_LOCKING_REQ(headItem)) { 673 /* locking request */ 674 if (!RF_QUEUE_LOCKED(queue)) { 675 /* queue isn't locked, so dequeue the request & lock the queue */ 676 p = (queue->qPtr->Dequeue)( queue->qHdr ); 677 if (p) 678 Dprintf4("[%d] dequeueing pri %d locking op r %d c %d\n", tid, p->priority, queue->row, queue->col); 679 else 680 Dprintf3("[%d] no dequeue -- raw queue empty r %d c %d\n", tid, queue->row, queue->col); 681 } 682 else { 683 /* queue already locked, no dequeue occurs */ 684 Dprintf3("[%d] no dequeue -- queue is locked r %d c %d\n", tid, queue->row, queue->col); 685 p = NULL; 686 } 687 } 688 else { 689 /* normal request, always dequeue and assume caller already has lock (if needed) */ 690 p = (queue->qPtr->Dequeue)( queue->qHdr ); 691 if (p) 692 Dprintf4("[%d] dequeueing pri %d regular op r %d c %d\n", tid, p->priority, queue->row, queue->col); 693 else 694 Dprintf3("[%d] no dequeue -- raw queue empty r %d c %d\n", tid, queue->row, queue->col); 695 } 696 } 697 else { 698 Dprintf3("[%d] no dequeue -- raw queue empty r %d c %d\n", tid, queue->row, queue->col); 699 } 700 } 701 702 if (queue->raidPtr->terminate_disk_queues) { 703 p = NULL; 704 break; 705 } 706#ifdef SIMULATE 707 break; /* in simulator, return NULL on empty queue instead of blocking */ 708#else /* SIMULATE */ 709 if (!p) { 710 Dprintf3("[%d] nothing to dequeue: waiting r %d c %d\n", tid, queue->row, queue->col); 711 WAIT_DISK_QUEUE( queue, "DiskIODequeue" ); 712 } 713#endif /* SIMULATE */ 714 } 715 716 if (p) { 717 queue->queueLength--; /* decrement count of number of requests waiting in this queue */ 718 RF_ASSERT(queue->queueLength >= 0); 719 queue->numOutstanding++; 720 queue->last_deq_sector = p->sectorOffset; 721 /* record the amount of time this request spent in the disk queue */ 722 RF_ETIMER_STOP(p->qtime); 723 RF_ETIMER_EVAL(p->qtime); 724 if (p->tracerec) 725 p->tracerec->diskqueue_us += RF_ETIMER_VAL_US(p->qtime); 726 } 727 728 if (p && RF_LOCKING_REQ(p)) { 729 RF_ASSERT(!RF_QUEUE_LOCKED(queue)); 730 Dprintf3("[%d] locking queue r %d c %d\n",tid,queue->row,queue->col); 731 RF_LOCK_QUEUE(queue); 732 } 733 RF_UNLOCK_QUEUE_MUTEX( queue, "DiskIODequeue" ); 734 735 return(p); 736} 737 738#else /* !KERNEL */ 739 740/* get the next set of I/Os started, kernel version only */ 741void rf_DiskIOComplete(queue, req, status) 742 RF_DiskQueue_t *queue; 743 RF_DiskQueueData_t *req; 744 int status; 745{ 746 int done=0; 747 748 RF_LOCK_QUEUE_MUTEX( queue, "DiskIOComplete" ); 749 750 /* unlock the queue: 751 (1) after an unlocking req completes 752 (2) after a locking req fails 753 */ 754 if (RF_UNLOCKING_REQ(req) || (RF_LOCKING_REQ(req) && status)) { 755 Dprintf2("DiskIOComplete: unlocking queue at r %d c %d\n", queue->row, queue->col); 756 RF_ASSERT(RF_QUEUE_LOCKED(queue) && (queue->unlockingOp == NULL)); 757 RF_UNLOCK_QUEUE(queue); 758 } 759 760 queue->numOutstanding--; 761 RF_ASSERT(queue->numOutstanding >= 0); 762 763 /* dispatch requests to the disk until we find one that we can't. */ 764 /* no reason to continue once we've filled up the queue */ 765 /* no reason to even start if the queue is locked */ 766 767 while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) { 768 if (queue->nextLockingOp) { 769 req = queue->nextLockingOp; queue->nextLockingOp = NULL; 770 Dprintf3("DiskIOComplete: a pri %d locking req was pending at r %d c %d\n",req->priority,queue->row, queue->col); 771 } else { 772 req = (queue->qPtr->Dequeue)( queue->qHdr ); 773 if (req != NULL) { 774 Dprintf3("DiskIOComplete: extracting pri %d req from queue at r %d c %d\n",req->priority,queue->row, queue->col); 775 } else { 776 Dprintf1("DiskIOComplete: no more requests to extract.\n",""); 777 } 778 } 779 if (req) { 780 queue->queueLength--; /* decrement count of number of requests waiting in this queue */ 781 RF_ASSERT(queue->queueLength >= 0); 782 } 783 if (!req) done=1; 784 else if (RF_LOCKING_REQ(req)) { 785 if (RF_QUEUE_EMPTY(queue)) { /* dispatch it */ 786 Dprintf3("DiskIOComplete: dispatching pri %d locking req to r %d c %d (queue empty)\n",req->priority,queue->row, queue->col); 787 RF_LOCK_QUEUE(queue); 788 rf_DispatchKernelIO(queue, req); 789 done = 1; 790 } else { /* put it aside to wait for the queue to drain */ 791 Dprintf3("DiskIOComplete: postponing pri %d locking req to r %d c %d\n",req->priority,queue->row, queue->col); 792 RF_ASSERT(queue->nextLockingOp == NULL); 793 queue->nextLockingOp = req; 794 done = 1; 795 } 796 } else if (RF_UNLOCKING_REQ(req)) { /* should not happen: unlocking ops should not get queued */ 797 RF_ASSERT(RF_QUEUE_LOCKED(queue)); /* support it anyway for the future */ 798 Dprintf3("DiskIOComplete: dispatching pri %d unl req to r %d c %d (SHOULD NOT SEE THIS)\n",req->priority,queue->row, queue->col); 799 rf_DispatchKernelIO(queue, req); 800 done = 1; 801 } else if (RF_OK_TO_DISPATCH(queue, req)) { 802 Dprintf3("DiskIOComplete: dispatching pri %d regular req to r %d c %d (ok to dispatch)\n",req->priority,queue->row, queue->col); 803 rf_DispatchKernelIO(queue, req); 804 } else { /* we can't dispatch it, so just re-enqueue it. */ 805 /* potential trouble here if disk queues batch reqs */ 806 Dprintf3("DiskIOComplete: re-enqueueing pri %d regular req to r %d c %d\n",req->priority,queue->row, queue->col); 807 queue->queueLength++; 808 (queue->qPtr->Enqueue)(queue->qHdr, req, req->priority); 809 done = 1; 810 } 811 } 812 813 RF_UNLOCK_QUEUE_MUTEX( queue, "DiskIOComplete" ); 814} 815#endif /* !KERNEL */ 816 817/* promotes accesses tagged with the given parityStripeID from low priority 818 * to normal priority. This promotion is optional, meaning that a queue 819 * need not implement it. If there is no promotion routine associated with 820 * a queue, this routine does nothing and returns -1. 821 */ 822int rf_DiskIOPromote(queue, parityStripeID, which_ru) 823 RF_DiskQueue_t *queue; 824 RF_StripeNum_t parityStripeID; 825 RF_ReconUnitNum_t which_ru; 826{ 827 int retval; 828 829 if (!queue->qPtr->Promote) 830 return(-1); 831 RF_LOCK_QUEUE_MUTEX( queue, "DiskIOPromote" ); 832 retval = (queue->qPtr->Promote)( queue->qHdr, parityStripeID, which_ru ); 833 RF_UNLOCK_QUEUE_MUTEX( queue, "DiskIOPromote" ); 834 return(retval); 835} 836 837RF_DiskQueueData_t *rf_CreateDiskQueueData( 838 RF_IoType_t typ, 839 RF_SectorNum_t ssect, 840 RF_SectorCount_t nsect, 841 caddr_t buf, 842 RF_StripeNum_t parityStripeID, 843 RF_ReconUnitNum_t which_ru, 844 int (*wakeF)(void *,int), 845 void *arg, 846 RF_DiskQueueData_t *next, 847 RF_AccTraceEntry_t *tracerec, 848 void *raidPtr, 849 RF_DiskQueueDataFlags_t flags, 850 void *kb_proc) 851{ 852 RF_DiskQueueData_t *p; 853 854 RF_FREELIST_GET_INIT(rf_dqd_freelist,p,next,(RF_DiskQueueData_t *),init_dqd); 855 856 p->sectorOffset = ssect + rf_protectedSectors; 857 p->numSector = nsect; 858 p->type = typ; 859 p->buf = buf; 860 p->parityStripeID= parityStripeID; 861 p->which_ru = which_ru; 862 p->CompleteFunc = wakeF; 863 p->argument = arg; 864 p->next = next; 865 p->tracerec = tracerec; 866 p->priority = RF_IO_NORMAL_PRIORITY; 867 p->AuxFunc = NULL; 868 p->buf2 = NULL; 869#ifdef SIMULATE 870 p->owner = rf_GetCurrentOwner(); 871#endif /* SIMULATE */ 872 p->raidPtr = raidPtr; 873 p->flags = flags; 874#ifdef KERNEL 875 p->b_proc = kb_proc; 876#endif /* KERNEL */ 877 return(p); 878} 879 880RF_DiskQueueData_t *rf_CreateDiskQueueDataFull( 881 RF_IoType_t typ, 882 RF_SectorNum_t ssect, 883 RF_SectorCount_t nsect, 884 caddr_t buf, 885 RF_StripeNum_t parityStripeID, 886 RF_ReconUnitNum_t which_ru, 887 int (*wakeF)(void *,int), 888 void *arg, 889 RF_DiskQueueData_t *next, 890 RF_AccTraceEntry_t *tracerec, 891 int priority, 892 int (*AuxFunc)(void *,...), 893 caddr_t buf2, 894 void *raidPtr, 895 RF_DiskQueueDataFlags_t flags, 896 void *kb_proc) 897{ 898 RF_DiskQueueData_t *p; 899 900 RF_FREELIST_GET_INIT(rf_dqd_freelist,p,next,(RF_DiskQueueData_t *),init_dqd); 901 902 p->sectorOffset = ssect + rf_protectedSectors; 903 p->numSector = nsect; 904 p->type = typ; 905 p->buf = buf; 906 p->parityStripeID= parityStripeID; 907 p->which_ru = which_ru; 908 p->CompleteFunc = wakeF; 909 p->argument = arg; 910 p->next = next; 911 p->tracerec = tracerec; 912 p->priority = priority; 913 p->AuxFunc = AuxFunc; 914 p->buf2 = buf2; 915#ifdef SIMULATE 916 p->owner = rf_GetCurrentOwner(); 917#endif /* SIMULATE */ 918 p->raidPtr = raidPtr; 919 p->flags = flags; 920#ifdef KERNEL 921 p->b_proc = kb_proc; 922#endif /* KERNEL */ 923 return(p); 924} 925 926void rf_FreeDiskQueueData(p) 927 RF_DiskQueueData_t *p; 928{ 929 RF_FREELIST_FREE_CLEAN(rf_dqd_freelist,p,next,clean_dqd); 930} 931