1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50 51#include <asm/byteorder.h> 52#include <asm/irq.h> 53#include <asm/uaccess.h> 54#include <asm/io.h> 55#include <asm/bitops.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, int nr_sectors) 59{ 60 int ret = 1; 61 62 /* 63 * if failfast is set on a request, override number of sectors and 64 * complete the whole request right now 65 */ 66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 67 nr_sectors = rq->hard_nr_sectors; 68 69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 70 rq->errors = -EIO; 71 72 /* 73 * decide whether to reenable DMA -- 3 is a random magic for now, 74 * if we DMA timeout more than 3 times, just stay in PIO 75 */ 76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 77 drive->state = 0; 78 HWGROUP(drive)->hwif->ide_dma_on(drive); 79 } 80 81 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 82 add_disk_randomness(rq->rq_disk); 83 if (!list_empty(&rq->queuelist)) 84 blkdev_dequeue_request(rq); 85 HWGROUP(drive)->rq = NULL; 86 end_that_request_last(rq, uptodate); 87 ret = 0; 88 } 89 90 return ret; 91} 92 93/** 94 * ide_end_request - complete an IDE I/O 95 * @drive: IDE device for the I/O 96 * @uptodate: 97 * @nr_sectors: number of sectors completed 98 * 99 * This is our end_request wrapper function. We complete the I/O 100 * update random number input and dequeue the request, which if 101 * it was tagged may be out of order. 102 */ 103 104int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 105{ 106 struct request *rq; 107 unsigned long flags; 108 int ret = 1; 109 110 /* 111 * room for locking improvements here, the calls below don't 112 * need the queue lock held at all 113 */ 114 spin_lock_irqsave(&ide_lock, flags); 115 rq = HWGROUP(drive)->rq; 116 117 if (!nr_sectors) 118 nr_sectors = rq->hard_cur_sectors; 119 120 ret = __ide_end_request(drive, rq, uptodate, nr_sectors); 121 122 spin_unlock_irqrestore(&ide_lock, flags); 123 return ret; 124} 125EXPORT_SYMBOL(ide_end_request); 126 127/* 128 * Power Management state machine. This one is rather trivial for now, 129 * we should probably add more, like switching back to PIO on suspend 130 * to help some BIOSes, re-do the door locking on resume, etc... 131 */ 132 133enum { 134 ide_pm_flush_cache = ide_pm_state_start_suspend, 135 idedisk_pm_standby, 136 137 idedisk_pm_restore_pio = ide_pm_state_start_resume, 138 idedisk_pm_idle, 139 ide_pm_restore_dma, 140}; 141 142static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 143{ 144 struct request_pm_state *pm = rq->data; 145 146 if (drive->media != ide_disk) 147 return; 148 149 switch (pm->pm_step) { 150 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 151 if (pm->pm_state == PM_EVENT_FREEZE) 152 pm->pm_step = ide_pm_state_completed; 153 else 154 pm->pm_step = idedisk_pm_standby; 155 break; 156 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 157 pm->pm_step = ide_pm_state_completed; 158 break; 159 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 160 pm->pm_step = idedisk_pm_idle; 161 break; 162 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 163 pm->pm_step = ide_pm_restore_dma; 164 break; 165 } 166} 167 168static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 169{ 170 struct request_pm_state *pm = rq->data; 171 ide_task_t *args = rq->special; 172 173 memset(args, 0, sizeof(*args)); 174 175 switch (pm->pm_step) { 176 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 177 if (drive->media != ide_disk) 178 break; 179 /* Not supported? Switch to next step now. */ 180 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 181 ide_complete_power_step(drive, rq, 0, 0); 182 return ide_stopped; 183 } 184 if (ide_id_has_flush_cache_ext(drive->id)) 185 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; 186 else 187 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; 188 args->command_type = IDE_DRIVE_TASK_NO_DATA; 189 args->handler = &task_no_data_intr; 190 return do_rw_taskfile(drive, args); 191 192 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 193 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; 194 args->command_type = IDE_DRIVE_TASK_NO_DATA; 195 args->handler = &task_no_data_intr; 196 return do_rw_taskfile(drive, args); 197 198 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 199 if (drive->hwif->tuneproc != NULL) 200 drive->hwif->tuneproc(drive, 255); 201 /* 202 * skip idedisk_pm_idle for ATAPI devices 203 */ 204 if (drive->media != ide_disk) 205 pm->pm_step = ide_pm_restore_dma; 206 else 207 ide_complete_power_step(drive, rq, 0, 0); 208 return ide_stopped; 209 210 case idedisk_pm_idle: /* Resume step 2 (idle) */ 211 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; 212 args->command_type = IDE_DRIVE_TASK_NO_DATA; 213 args->handler = task_no_data_intr; 214 return do_rw_taskfile(drive, args); 215 216 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 217 /* 218 * Right now, all we do is call hwif->ide_dma_check(drive), 219 * we could be smarter and check for current xfer_speed 220 * in struct drive etc... 221 */ 222 if ((drive->id->capability & 1) == 0) 223 break; 224 if (drive->hwif->ide_dma_check == NULL) 225 break; 226 drive->hwif->dma_off_quietly(drive); 227 ide_set_dma(drive); 228 break; 229 } 230 pm->pm_step = ide_pm_state_completed; 231 return ide_stopped; 232} 233 234/** 235 * ide_end_dequeued_request - complete an IDE I/O 236 * @drive: IDE device for the I/O 237 * @uptodate: 238 * @nr_sectors: number of sectors completed 239 * 240 * Complete an I/O that is no longer on the request queue. This 241 * typically occurs when we pull the request and issue a REQUEST_SENSE. 242 * We must still finish the old request but we must not tamper with the 243 * queue in the meantime. 244 * 245 * NOTE: This path does not handle barrier, but barrier is not supported 246 * on ide-cd anyway. 247 */ 248 249int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 250 int uptodate, int nr_sectors) 251{ 252 unsigned long flags; 253 int ret = 1; 254 255 spin_lock_irqsave(&ide_lock, flags); 256 257 BUG_ON(!blk_rq_started(rq)); 258 259 /* 260 * if failfast is set on a request, override number of sectors and 261 * complete the whole request right now 262 */ 263 if (blk_noretry_request(rq) && end_io_error(uptodate)) 264 nr_sectors = rq->hard_nr_sectors; 265 266 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 267 rq->errors = -EIO; 268 269 /* 270 * decide whether to reenable DMA -- 3 is a random magic for now, 271 * if we DMA timeout more than 3 times, just stay in PIO 272 */ 273 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 274 drive->state = 0; 275 HWGROUP(drive)->hwif->ide_dma_on(drive); 276 } 277 278 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 279 add_disk_randomness(rq->rq_disk); 280 if (blk_rq_tagged(rq)) 281 blk_queue_end_tag(drive->queue, rq); 282 end_that_request_last(rq, uptodate); 283 ret = 0; 284 } 285 spin_unlock_irqrestore(&ide_lock, flags); 286 return ret; 287} 288EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 289 290 291/** 292 * ide_complete_pm_request - end the current Power Management request 293 * @drive: target drive 294 * @rq: request 295 * 296 * This function cleans up the current PM request and stops the queue 297 * if necessary. 298 */ 299static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 300{ 301 unsigned long flags; 302 303#ifdef DEBUG_PM 304 printk("%s: completing PM request, %s\n", drive->name, 305 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 306#endif 307 spin_lock_irqsave(&ide_lock, flags); 308 if (blk_pm_suspend_request(rq)) { 309 blk_stop_queue(drive->queue); 310 } else { 311 drive->blocked = 0; 312 blk_start_queue(drive->queue); 313 } 314 blkdev_dequeue_request(rq); 315 HWGROUP(drive)->rq = NULL; 316 end_that_request_last(rq, 1); 317 spin_unlock_irqrestore(&ide_lock, flags); 318} 319 320u64 ide_get_error_location(ide_drive_t *drive, char *args) 321{ 322 u32 high, low; 323 u8 hcyl, lcyl, sect; 324 u64 sector; 325 326 high = 0; 327 hcyl = args[5]; 328 lcyl = args[4]; 329 sect = args[3]; 330 331 if (ide_id_has_flush_cache_ext(drive->id)) { 332 low = (hcyl << 16) | (lcyl << 8) | sect; 333 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 334 high = ide_read_24(drive); 335 } else { 336 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); 337 if (cur & 0x40) { 338 high = cur & 0xf; 339 low = (hcyl << 16) | (lcyl << 8) | sect; 340 } else { 341 low = hcyl * drive->head * drive->sect; 342 low += lcyl * drive->sect; 343 low += sect - 1; 344 } 345 } 346 347 sector = ((u64) high << 24) | low; 348 return sector; 349} 350EXPORT_SYMBOL(ide_get_error_location); 351 352/** 353 * ide_end_drive_cmd - end an explicit drive command 354 * @drive: command 355 * @stat: status bits 356 * @err: error bits 357 * 358 * Clean up after success/failure of an explicit drive command. 359 * These get thrown onto the queue so they are synchronized with 360 * real I/O operations on the drive. 361 * 362 * In LBA48 mode we have to read the register set twice to get 363 * all the extra information out. 364 */ 365 366void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 367{ 368 ide_hwif_t *hwif = HWIF(drive); 369 unsigned long flags; 370 struct request *rq; 371 372 spin_lock_irqsave(&ide_lock, flags); 373 rq = HWGROUP(drive)->rq; 374 spin_unlock_irqrestore(&ide_lock, flags); 375 376 if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 377 u8 *args = (u8 *) rq->buffer; 378 if (rq->errors == 0) 379 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 380 381 if (args) { 382 args[0] = stat; 383 args[1] = err; 384 args[2] = hwif->INB(IDE_NSECTOR_REG); 385 } 386 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 387 u8 *args = (u8 *) rq->buffer; 388 if (rq->errors == 0) 389 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 390 391 if (args) { 392 args[0] = stat; 393 args[1] = err; 394 args[2] = hwif->INB(IDE_NSECTOR_REG); 395 args[3] = hwif->INB(IDE_SECTOR_REG); 396 args[4] = hwif->INB(IDE_LCYL_REG); 397 args[5] = hwif->INB(IDE_HCYL_REG); 398 args[6] = hwif->INB(IDE_SELECT_REG); 399 } 400 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 401 ide_task_t *args = (ide_task_t *) rq->special; 402 if (rq->errors == 0) 403 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 404 405 if (args) { 406 if (args->tf_in_flags.b.data) { 407 u16 data = hwif->INW(IDE_DATA_REG); 408 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; 409 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; 410 } 411 args->tfRegister[IDE_ERROR_OFFSET] = err; 412 /* be sure we're looking at the low order bits */ 413 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 414 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 415 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 416 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 417 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 418 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); 419 args->tfRegister[IDE_STATUS_OFFSET] = stat; 420 421 if (drive->addressing == 1) { 422 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 423 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); 424 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 425 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 426 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 427 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 428 } 429 } 430 } else if (blk_pm_request(rq)) { 431 struct request_pm_state *pm = rq->data; 432#ifdef DEBUG_PM 433 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 434 drive->name, rq->pm->pm_step, stat, err); 435#endif 436 ide_complete_power_step(drive, rq, stat, err); 437 if (pm->pm_step == ide_pm_state_completed) 438 ide_complete_pm_request(drive, rq); 439 return; 440 } 441 442 spin_lock_irqsave(&ide_lock, flags); 443 blkdev_dequeue_request(rq); 444 HWGROUP(drive)->rq = NULL; 445 rq->errors = err; 446 end_that_request_last(rq, !rq->errors); 447 spin_unlock_irqrestore(&ide_lock, flags); 448} 449 450EXPORT_SYMBOL(ide_end_drive_cmd); 451 452/** 453 * try_to_flush_leftover_data - flush junk 454 * @drive: drive to flush 455 * 456 * try_to_flush_leftover_data() is invoked in response to a drive 457 * unexpectedly having its DRQ_STAT bit set. As an alternative to 458 * resetting the drive, this routine tries to clear the condition 459 * by read a sector's worth of data from the drive. Of course, 460 * this may not help if the drive is *waiting* for data from *us*. 461 */ 462static void try_to_flush_leftover_data (ide_drive_t *drive) 463{ 464 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 465 466 if (drive->media != ide_disk) 467 return; 468 while (i > 0) { 469 u32 buffer[16]; 470 u32 wcount = (i > 16) ? 16 : i; 471 472 i -= wcount; 473 HWIF(drive)->ata_input_data(drive, buffer, wcount); 474 } 475} 476 477static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 478{ 479 if (rq->rq_disk) { 480 ide_driver_t *drv; 481 482 drv = *(ide_driver_t **)rq->rq_disk->private_data; 483 drv->end_request(drive, 0, 0); 484 } else 485 ide_end_request(drive, 0, 0); 486} 487 488static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 489{ 490 ide_hwif_t *hwif = drive->hwif; 491 492 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 493 /* other bits are useless when BUSY */ 494 rq->errors |= ERROR_RESET; 495 } else if (stat & ERR_STAT) { 496 /* err has different meaning on cdrom and tape */ 497 if (err == ABRT_ERR) { 498 if (drive->select.b.lba && 499 /* some newer drives don't support WIN_SPECIFY */ 500 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 501 return ide_stopped; 502 } else if ((err & BAD_CRC) == BAD_CRC) { 503 /* UDMA crc error, just retry the operation */ 504 drive->crc_count++; 505 } else if (err & (BBD_ERR | ECC_ERR)) { 506 /* retries won't help these */ 507 rq->errors = ERROR_MAX; 508 } else if (err & TRK0_ERR) { 509 /* help it find track zero */ 510 rq->errors |= ERROR_RECAL; 511 } 512 } 513 514 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0) 515 try_to_flush_leftover_data(drive); 516 517 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 518 ide_kill_rq(drive, rq); 519 return ide_stopped; 520 } 521 522 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 523 rq->errors |= ERROR_RESET; 524 525 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 526 ++rq->errors; 527 return ide_do_reset(drive); 528 } 529 530 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 531 drive->special.b.recalibrate = 1; 532 533 ++rq->errors; 534 535 return ide_stopped; 536} 537 538static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 539{ 540 ide_hwif_t *hwif = drive->hwif; 541 542 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 543 /* other bits are useless when BUSY */ 544 rq->errors |= ERROR_RESET; 545 } else { 546 /* add decoding error stuff */ 547 } 548 549 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 550 /* force an abort */ 551 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 552 553 if (rq->errors >= ERROR_MAX) { 554 ide_kill_rq(drive, rq); 555 } else { 556 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 557 ++rq->errors; 558 return ide_do_reset(drive); 559 } 560 ++rq->errors; 561 } 562 563 return ide_stopped; 564} 565 566ide_startstop_t 567__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 568{ 569 if (drive->media == ide_disk) 570 return ide_ata_error(drive, rq, stat, err); 571 return ide_atapi_error(drive, rq, stat, err); 572} 573 574EXPORT_SYMBOL_GPL(__ide_error); 575 576/** 577 * ide_error - handle an error on the IDE 578 * @drive: drive the error occurred on 579 * @msg: message to report 580 * @stat: status bits 581 * 582 * ide_error() takes action based on the error returned by the drive. 583 * For normal I/O that may well include retries. We deal with 584 * both new-style (taskfile) and old style command handling here. 585 * In the case of taskfile command handling there is work left to 586 * do 587 */ 588 589ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 590{ 591 struct request *rq; 592 u8 err; 593 594 err = ide_dump_status(drive, msg, stat); 595 596 if ((rq = HWGROUP(drive)->rq) == NULL) 597 return ide_stopped; 598 599 /* retry only "normal" I/O: */ 600 if (!blk_fs_request(rq)) { 601 rq->errors = 1; 602 ide_end_drive_cmd(drive, stat, err); 603 return ide_stopped; 604 } 605 606 if (rq->rq_disk) { 607 ide_driver_t *drv; 608 609 drv = *(ide_driver_t **)rq->rq_disk->private_data; 610 return drv->error(drive, rq, stat, err); 611 } else 612 return __ide_error(drive, rq, stat, err); 613} 614 615EXPORT_SYMBOL_GPL(ide_error); 616 617ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 618{ 619 if (drive->media != ide_disk) 620 rq->errors |= ERROR_RESET; 621 622 ide_kill_rq(drive, rq); 623 624 return ide_stopped; 625} 626 627EXPORT_SYMBOL_GPL(__ide_abort); 628 629/** 630 * ide_abort - abort pending IDE operations 631 * @drive: drive the error occurred on 632 * @msg: message to report 633 * 634 * ide_abort kills and cleans up when we are about to do a 635 * host initiated reset on active commands. Longer term we 636 * want handlers to have sensible abort handling themselves 637 * 638 * This differs fundamentally from ide_error because in 639 * this case the command is doing just fine when we 640 * blow it away. 641 */ 642 643ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 644{ 645 struct request *rq; 646 647 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 648 return ide_stopped; 649 650 /* retry only "normal" I/O: */ 651 if (!blk_fs_request(rq)) { 652 rq->errors = 1; 653 ide_end_drive_cmd(drive, BUSY_STAT, 0); 654 return ide_stopped; 655 } 656 657 if (rq->rq_disk) { 658 ide_driver_t *drv; 659 660 drv = *(ide_driver_t **)rq->rq_disk->private_data; 661 return drv->abort(drive, rq); 662 } else 663 return __ide_abort(drive, rq); 664} 665 666/** 667 * ide_cmd - issue a simple drive command 668 * @drive: drive the command is for 669 * @cmd: command byte 670 * @nsect: sector byte 671 * @handler: handler for the command completion 672 * 673 * Issue a simple drive command with interrupts. 674 * The drive must be selected beforehand. 675 */ 676 677static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, 678 ide_handler_t *handler) 679{ 680 ide_hwif_t *hwif = HWIF(drive); 681 if (IDE_CONTROL_REG) 682 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ 683 SELECT_MASK(drive,0); 684 hwif->OUTB(nsect,IDE_NSECTOR_REG); 685 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); 686} 687 688/** 689 * drive_cmd_intr - drive command completion interrupt 690 * @drive: drive the completion interrupt occurred on 691 * 692 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. 693 * We do any necessary data reading and then wait for the drive to 694 * go non busy. At that point we may read the error data and complete 695 * the request 696 */ 697 698static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) 699{ 700 struct request *rq = HWGROUP(drive)->rq; 701 ide_hwif_t *hwif = HWIF(drive); 702 u8 *args = (u8 *) rq->buffer; 703 u8 stat = hwif->INB(IDE_STATUS_REG); 704 int retries = 10; 705 706 local_irq_enable_in_hardirq(); 707 if ((stat & DRQ_STAT) && args && args[3]) { 708 u8 io_32bit = drive->io_32bit; 709 drive->io_32bit = 0; 710 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); 711 drive->io_32bit = io_32bit; 712 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) 713 udelay(100); 714 } 715 716 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 717 return ide_error(drive, "drive_cmd", stat); 718 /* calls ide_end_drive_cmd */ 719 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); 720 return ide_stopped; 721} 722 723static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) 724{ 725 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 726 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; 727 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; 728 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; 729 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; 730 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; 731 732 task->handler = &set_geometry_intr; 733} 734 735static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) 736{ 737 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 738 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; 739 740 task->handler = &recal_intr; 741} 742 743static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) 744{ 745 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; 746 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; 747 748 task->handler = &set_multmode_intr; 749} 750 751static ide_startstop_t ide_disk_special(ide_drive_t *drive) 752{ 753 special_t *s = &drive->special; 754 ide_task_t args; 755 756 memset(&args, 0, sizeof(ide_task_t)); 757 args.command_type = IDE_DRIVE_TASK_NO_DATA; 758 759 if (s->b.set_geometry) { 760 s->b.set_geometry = 0; 761 ide_init_specify_cmd(drive, &args); 762 } else if (s->b.recalibrate) { 763 s->b.recalibrate = 0; 764 ide_init_restore_cmd(drive, &args); 765 } else if (s->b.set_multmode) { 766 s->b.set_multmode = 0; 767 if (drive->mult_req > drive->id->max_multsect) 768 drive->mult_req = drive->id->max_multsect; 769 ide_init_setmult_cmd(drive, &args); 770 } else if (s->all) { 771 int special = s->all; 772 s->all = 0; 773 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 774 return ide_stopped; 775 } 776 777 do_rw_taskfile(drive, &args); 778 779 return ide_started; 780} 781 782/** 783 * do_special - issue some special commands 784 * @drive: drive the command is for 785 * 786 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 787 * commands to a drive. It used to do much more, but has been scaled 788 * back. 789 */ 790 791static ide_startstop_t do_special (ide_drive_t *drive) 792{ 793 special_t *s = &drive->special; 794 795#ifdef DEBUG 796 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 797#endif 798 if (s->b.set_tune) { 799 s->b.set_tune = 0; 800 if (HWIF(drive)->tuneproc != NULL) 801 HWIF(drive)->tuneproc(drive, drive->tune_req); 802 return ide_stopped; 803 } else { 804 if (drive->media == ide_disk) 805 return ide_disk_special(drive); 806 807 s->all = 0; 808 drive->mult_req = 0; 809 return ide_stopped; 810 } 811} 812 813void ide_map_sg(ide_drive_t *drive, struct request *rq) 814{ 815 ide_hwif_t *hwif = drive->hwif; 816 struct scatterlist *sg = hwif->sg_table; 817 818 if (hwif->sg_mapped) /* needed by ide-scsi */ 819 return; 820 821 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 822 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 823 } else { 824 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 825 hwif->sg_nents = 1; 826 } 827} 828 829EXPORT_SYMBOL_GPL(ide_map_sg); 830 831void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 832{ 833 ide_hwif_t *hwif = drive->hwif; 834 835 hwif->nsect = hwif->nleft = rq->nr_sectors; 836 hwif->cursg = hwif->cursg_ofs = 0; 837} 838 839EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 840 841/** 842 * execute_drive_command - issue special drive command 843 * @drive: the drive to issue the command on 844 * @rq: the request structure holding the command 845 * 846 * execute_drive_cmd() issues a special drive command, usually 847 * initiated by ioctl() from the external hdparm program. The 848 * command can be a drive command, drive task or taskfile 849 * operation. Weirdly you can call it with NULL to wait for 850 * all commands to finish. Don't do this as that is due to change 851 */ 852 853static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 854 struct request *rq) 855{ 856 ide_hwif_t *hwif = HWIF(drive); 857 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 858 ide_task_t *args = rq->special; 859 860 if (!args) 861 goto done; 862 863 hwif->data_phase = args->data_phase; 864 865 switch (hwif->data_phase) { 866 case TASKFILE_MULTI_OUT: 867 case TASKFILE_OUT: 868 case TASKFILE_MULTI_IN: 869 case TASKFILE_IN: 870 ide_init_sg_cmd(drive, rq); 871 ide_map_sg(drive, rq); 872 default: 873 break; 874 } 875 876 if (args->tf_out_flags.all != 0) 877 return flagged_taskfile(drive, args); 878 return do_rw_taskfile(drive, args); 879 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 880 u8 *args = rq->buffer; 881 u8 sel; 882 883 if (!args) 884 goto done; 885#ifdef DEBUG 886 printk("%s: DRIVE_TASK_CMD ", drive->name); 887 printk("cmd=0x%02x ", args[0]); 888 printk("fr=0x%02x ", args[1]); 889 printk("ns=0x%02x ", args[2]); 890 printk("sc=0x%02x ", args[3]); 891 printk("lcyl=0x%02x ", args[4]); 892 printk("hcyl=0x%02x ", args[5]); 893 printk("sel=0x%02x\n", args[6]); 894#endif 895 hwif->OUTB(args[1], IDE_FEATURE_REG); 896 hwif->OUTB(args[3], IDE_SECTOR_REG); 897 hwif->OUTB(args[4], IDE_LCYL_REG); 898 hwif->OUTB(args[5], IDE_HCYL_REG); 899 sel = (args[6] & ~0x10); 900 if (drive->select.b.unit) 901 sel |= 0x10; 902 hwif->OUTB(sel, IDE_SELECT_REG); 903 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 904 return ide_started; 905 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 906 u8 *args = rq->buffer; 907 908 if (!args) 909 goto done; 910#ifdef DEBUG 911 printk("%s: DRIVE_CMD ", drive->name); 912 printk("cmd=0x%02x ", args[0]); 913 printk("sc=0x%02x ", args[1]); 914 printk("fr=0x%02x ", args[2]); 915 printk("xx=0x%02x\n", args[3]); 916#endif 917 if (args[0] == WIN_SMART) { 918 hwif->OUTB(0x4f, IDE_LCYL_REG); 919 hwif->OUTB(0xc2, IDE_HCYL_REG); 920 hwif->OUTB(args[2],IDE_FEATURE_REG); 921 hwif->OUTB(args[1],IDE_SECTOR_REG); 922 ide_cmd(drive, args[0], args[3], &drive_cmd_intr); 923 return ide_started; 924 } 925 hwif->OUTB(args[2],IDE_FEATURE_REG); 926 ide_cmd(drive, args[0], args[1], &drive_cmd_intr); 927 return ide_started; 928 } 929 930done: 931 /* 932 * NULL is actually a valid way of waiting for 933 * all current requests to be flushed from the queue. 934 */ 935#ifdef DEBUG 936 printk("%s: DRIVE_CMD (null)\n", drive->name); 937#endif 938 ide_end_drive_cmd(drive, 939 hwif->INB(IDE_STATUS_REG), 940 hwif->INB(IDE_ERROR_REG)); 941 return ide_stopped; 942} 943 944static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 945{ 946 struct request_pm_state *pm = rq->data; 947 948 if (blk_pm_suspend_request(rq) && 949 pm->pm_step == ide_pm_state_start_suspend) 950 /* Mark drive blocked when starting the suspend sequence. */ 951 drive->blocked = 1; 952 else if (blk_pm_resume_request(rq) && 953 pm->pm_step == ide_pm_state_start_resume) { 954 /* 955 * The first thing we do on wakeup is to wait for BSY bit to 956 * go away (with a looong timeout) as a drive on this hwif may 957 * just be POSTing itself. 958 * We do that before even selecting as the "other" device on 959 * the bus may be broken enough to walk on our toes at this 960 * point. 961 */ 962 int rc; 963#ifdef DEBUG_PM 964 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 965#endif 966 rc = ide_wait_not_busy(HWIF(drive), 35000); 967 if (rc) 968 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 969 SELECT_DRIVE(drive); 970 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); 971 rc = ide_wait_not_busy(HWIF(drive), 100000); 972 if (rc) 973 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 974 } 975} 976 977 978static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 979{ 980 ide_startstop_t startstop; 981 sector_t block; 982 983 BUG_ON(!blk_rq_started(rq)); 984 985#ifdef DEBUG 986 printk("%s: start_request: current=0x%08lx\n", 987 HWIF(drive)->name, (unsigned long) rq); 988#endif 989 990 /* bail early if we've exceeded max_failures */ 991 if (drive->max_failures && (drive->failures > drive->max_failures)) { 992 goto kill_rq; 993 } 994 995 block = rq->sector; 996 if (blk_fs_request(rq) && 997 (drive->media == ide_disk || drive->media == ide_floppy)) { 998 block += drive->sect0; 999 } 1000 /* Yecch - this will shift the entire interval, 1001 possibly killing some innocent following sector */ 1002 if (block == 0 && drive->remap_0_to_1 == 1) 1003 block = 1; /* redirect MBR access to EZ-Drive partn table */ 1004 1005 if (blk_pm_request(rq)) 1006 ide_check_pm_state(drive, rq); 1007 1008 SELECT_DRIVE(drive); 1009 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 1010 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 1011 return startstop; 1012 } 1013 if (!drive->special.all) { 1014 ide_driver_t *drv; 1015 1016 /* 1017 * We reset the drive so we need to issue a SETFEATURES. 1018 * Do it _after_ do_special() restored device parameters. 1019 */ 1020 if (drive->current_speed == 0xff) 1021 ide_config_drive_speed(drive, drive->desired_speed); 1022 1023 if (rq->cmd_type == REQ_TYPE_ATA_CMD || 1024 rq->cmd_type == REQ_TYPE_ATA_TASK || 1025 rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 1026 return execute_drive_cmd(drive, rq); 1027 else if (blk_pm_request(rq)) { 1028 struct request_pm_state *pm = rq->data; 1029#ifdef DEBUG_PM 1030 printk("%s: start_power_step(step: %d)\n", 1031 drive->name, rq->pm->pm_step); 1032#endif 1033 startstop = ide_start_power_step(drive, rq); 1034 if (startstop == ide_stopped && 1035 pm->pm_step == ide_pm_state_completed) 1036 ide_complete_pm_request(drive, rq); 1037 return startstop; 1038 } 1039 1040 drv = *(ide_driver_t **)rq->rq_disk->private_data; 1041 return drv->do_request(drive, rq, block); 1042 } 1043 return do_special(drive); 1044kill_rq: 1045 ide_kill_rq(drive, rq); 1046 return ide_stopped; 1047} 1048 1049/** 1050 * ide_stall_queue - pause an IDE device 1051 * @drive: drive to stall 1052 * @timeout: time to stall for (jiffies) 1053 * 1054 * ide_stall_queue() can be used by a drive to give excess bandwidth back 1055 * to the hwgroup by sleeping for timeout jiffies. 1056 */ 1057 1058void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 1059{ 1060 if (timeout > WAIT_WORSTCASE) 1061 timeout = WAIT_WORSTCASE; 1062 drive->sleep = timeout + jiffies; 1063 drive->sleeping = 1; 1064} 1065 1066EXPORT_SYMBOL(ide_stall_queue); 1067 1068#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 1069 1070/** 1071 * choose_drive - select a drive to service 1072 * @hwgroup: hardware group to select on 1073 * 1074 * choose_drive() selects the next drive which will be serviced. 1075 * This is necessary because the IDE layer can't issue commands 1076 * to both drives on the same cable, unlike SCSI. 1077 */ 1078 1079static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 1080{ 1081 ide_drive_t *drive, *best; 1082 1083repeat: 1084 best = NULL; 1085 drive = hwgroup->drive; 1086 1087 /* 1088 * drive is doing pre-flush, ordered write, post-flush sequence. even 1089 * though that is 3 requests, it must be seen as a single transaction. 1090 * we must not preempt this drive until that is complete 1091 */ 1092 if (blk_queue_flushing(drive->queue)) { 1093 /* 1094 * small race where queue could get replugged during 1095 * the 3-request flush cycle, just yank the plug since 1096 * we want it to finish asap 1097 */ 1098 blk_remove_plug(drive->queue); 1099 return drive; 1100 } 1101 1102 do { 1103 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1104 && !elv_queue_empty(drive->queue)) { 1105 if (!best 1106 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1107 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1108 { 1109 if (!blk_queue_plugged(drive->queue)) 1110 best = drive; 1111 } 1112 } 1113 } while ((drive = drive->next) != hwgroup->drive); 1114 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1115 long t = (signed long)(WAKEUP(best) - jiffies); 1116 if (t >= WAIT_MIN_SLEEP) { 1117 /* 1118 * We *may* have some time to spare, but first let's see if 1119 * someone can potentially benefit from our nice mood today.. 1120 */ 1121 drive = best->next; 1122 do { 1123 if (!drive->sleeping 1124 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1125 && time_before(WAKEUP(drive), jiffies + t)) 1126 { 1127 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1128 goto repeat; 1129 } 1130 } while ((drive = drive->next) != best); 1131 } 1132 } 1133 return best; 1134} 1135 1136/* 1137 * Issue a new request to a drive from hwgroup 1138 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1139 * 1140 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1141 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1142 * may have both interfaces in a single hwgroup to "serialize" access. 1143 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1144 * together into one hwgroup for serialized access. 1145 * 1146 * Note also that several hwgroups can end up sharing a single IRQ, 1147 * possibly along with many other devices. This is especially common in 1148 * PCI-based systems with off-board IDE controller cards. 1149 * 1150 * The IDE driver uses the single global ide_lock spinlock to protect 1151 * access to the request queues, and to protect the hwgroup->busy flag. 1152 * 1153 * The first thread into the driver for a particular hwgroup sets the 1154 * hwgroup->busy flag to indicate that this hwgroup is now active, 1155 * and then initiates processing of the top request from the request queue. 1156 * 1157 * Other threads attempting entry notice the busy setting, and will simply 1158 * queue their new requests and exit immediately. Note that hwgroup->busy 1159 * remains set even when the driver is merely awaiting the next interrupt. 1160 * Thus, the meaning is "this hwgroup is busy processing a request". 1161 * 1162 * When processing of a request completes, the completing thread or IRQ-handler 1163 * will start the next request from the queue. If no more work remains, 1164 * the driver will clear the hwgroup->busy flag and exit. 1165 * 1166 * The ide_lock (spinlock) is used to protect all access to the 1167 * hwgroup->busy flag, but is otherwise not needed for most processing in 1168 * the driver. This makes the driver much more friendlier to shared IRQs 1169 * than previous designs, while remaining 100% (?) SMP safe and capable. 1170 */ 1171static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1172{ 1173 ide_drive_t *drive; 1174 ide_hwif_t *hwif; 1175 struct request *rq; 1176 ide_startstop_t startstop; 1177 int loops = 0; 1178 1179 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1180 ide_get_lock(ide_intr, hwgroup); 1181 1182 /* caller must own ide_lock */ 1183 BUG_ON(!irqs_disabled()); 1184 1185 while (!hwgroup->busy) { 1186 hwgroup->busy = 1; 1187 drive = choose_drive(hwgroup); 1188 if (drive == NULL) { 1189 int sleeping = 0; 1190 unsigned long sleep = 0; /* shut up, gcc */ 1191 hwgroup->rq = NULL; 1192 drive = hwgroup->drive; 1193 do { 1194 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1195 sleeping = 1; 1196 sleep = drive->sleep; 1197 } 1198 } while ((drive = drive->next) != hwgroup->drive); 1199 if (sleeping) { 1200 /* 1201 * Take a short snooze, and then wake up this hwgroup again. 1202 * This gives other hwgroups on the same a chance to 1203 * play fairly with us, just in case there are big differences 1204 * in relative throughputs.. don't want to hog the cpu too much. 1205 */ 1206 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1207 sleep = jiffies + WAIT_MIN_SLEEP; 1208 if (timer_pending(&hwgroup->timer)) 1209 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1210 /* so that ide_timer_expiry knows what to do */ 1211 hwgroup->sleeping = 1; 1212 hwgroup->req_gen_timer = hwgroup->req_gen; 1213 mod_timer(&hwgroup->timer, sleep); 1214 /* we purposely leave hwgroup->busy==1 1215 * while sleeping */ 1216 } else { 1217 /* Ugly, but how can we sleep for the lock 1218 * otherwise? perhaps from tq_disk? 1219 */ 1220 1221 /* for atari only */ 1222 ide_release_lock(); 1223 hwgroup->busy = 0; 1224 } 1225 1226 /* no more work for this hwgroup (for now) */ 1227 return; 1228 } 1229 again: 1230 hwif = HWIF(drive); 1231 if (hwgroup->hwif->sharing_irq && 1232 hwif != hwgroup->hwif && 1233 hwif->io_ports[IDE_CONTROL_OFFSET]) { 1234 /* set nIEN for previous hwif */ 1235 SELECT_INTERRUPT(drive); 1236 } 1237 hwgroup->hwif = hwif; 1238 hwgroup->drive = drive; 1239 drive->sleeping = 0; 1240 drive->service_start = jiffies; 1241 1242 if (blk_queue_plugged(drive->queue)) { 1243 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1244 break; 1245 } 1246 1247 /* 1248 * we know that the queue isn't empty, but this can happen 1249 * if the q->prep_rq_fn() decides to kill a request 1250 */ 1251 rq = elv_next_request(drive->queue); 1252 if (!rq) { 1253 hwgroup->busy = 0; 1254 break; 1255 } 1256 1257 /* 1258 * Sanity: don't accept a request that isn't a PM request 1259 * if we are currently power managed. This is very important as 1260 * blk_stop_queue() doesn't prevent the elv_next_request() 1261 * above to return us whatever is in the queue. Since we call 1262 * ide_do_request() ourselves, we end up taking requests while 1263 * the queue is blocked... 1264 * 1265 * We let requests forced at head of queue with ide-preempt 1266 * though. I hope that doesn't happen too much, hopefully not 1267 * unless the subdriver triggers such a thing in its own PM 1268 * state machine. 1269 * 1270 * We count how many times we loop here to make sure we service 1271 * all drives in the hwgroup without looping for ever 1272 */ 1273 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1274 drive = drive->next ? drive->next : hwgroup->drive; 1275 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1276 goto again; 1277 /* We clear busy, there should be no pending ATA command at this point. */ 1278 hwgroup->busy = 0; 1279 break; 1280 } 1281 1282 hwgroup->rq = rq; 1283 1284 /* 1285 * Some systems have trouble with IDE IRQs arriving while 1286 * the driver is still setting things up. So, here we disable 1287 * the IRQ used by this interface while the request is being started. 1288 * This may look bad at first, but pretty much the same thing 1289 * happens anyway when any interrupt comes in, IDE or otherwise 1290 * -- the kernel masks the IRQ while it is being handled. 1291 */ 1292 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1293 disable_irq_nosync(hwif->irq); 1294 spin_unlock(&ide_lock); 1295 local_irq_enable_in_hardirq(); 1296 /* allow other IRQs while we start this request */ 1297 startstop = start_request(drive, rq); 1298 spin_lock_irq(&ide_lock); 1299 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1300 enable_irq(hwif->irq); 1301 if (startstop == ide_stopped) 1302 hwgroup->busy = 0; 1303 } 1304} 1305 1306/* 1307 * Passes the stuff to ide_do_request 1308 */ 1309void do_ide_request(request_queue_t *q) 1310{ 1311 ide_drive_t *drive = q->queuedata; 1312 1313 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1314} 1315 1316/* 1317 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1318 * retry the current request in pio mode instead of risking tossing it 1319 * all away 1320 */ 1321static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1322{ 1323 ide_hwif_t *hwif = HWIF(drive); 1324 struct request *rq; 1325 ide_startstop_t ret = ide_stopped; 1326 1327 /* 1328 * end current dma transaction 1329 */ 1330 1331 if (error < 0) { 1332 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1333 (void)HWIF(drive)->ide_dma_end(drive); 1334 ret = ide_error(drive, "dma timeout error", 1335 hwif->INB(IDE_STATUS_REG)); 1336 } else { 1337 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1338 (void) hwif->ide_dma_timeout(drive); 1339 } 1340 1341 /* 1342 * disable dma for now, but remember that we did so because of 1343 * a timeout -- we'll reenable after we finish this next request 1344 * (or rather the first chunk of it) in pio. 1345 */ 1346 drive->retry_pio++; 1347 drive->state = DMA_PIO_RETRY; 1348 hwif->dma_off_quietly(drive); 1349 1350 /* 1351 * un-busy drive etc (hwgroup->busy is cleared on return) and 1352 * make sure request is sane 1353 */ 1354 rq = HWGROUP(drive)->rq; 1355 1356 if (!rq) 1357 goto out; 1358 1359 HWGROUP(drive)->rq = NULL; 1360 1361 rq->errors = 0; 1362 1363 if (!rq->bio) 1364 goto out; 1365 1366 rq->sector = rq->bio->bi_sector; 1367 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1368 rq->hard_cur_sectors = rq->current_nr_sectors; 1369 rq->buffer = bio_data(rq->bio); 1370out: 1371 return ret; 1372} 1373 1374/** 1375 * ide_timer_expiry - handle lack of an IDE interrupt 1376 * @data: timer callback magic (hwgroup) 1377 * 1378 * An IDE command has timed out before the expected drive return 1379 * occurred. At this point we attempt to clean up the current 1380 * mess. If the current handler includes an expiry handler then 1381 * we invoke the expiry handler, and providing it is happy the 1382 * work is done. If that fails we apply generic recovery rules 1383 * invoking the handler and checking the drive DMA status. We 1384 * have an excessively incestuous relationship with the DMA 1385 * logic that wants cleaning up. 1386 */ 1387 1388void ide_timer_expiry (unsigned long data) 1389{ 1390 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1391 ide_handler_t *handler; 1392 ide_expiry_t *expiry; 1393 unsigned long flags; 1394 unsigned long wait = -1; 1395 1396 spin_lock_irqsave(&ide_lock, flags); 1397 1398 if (((handler = hwgroup->handler) == NULL) || 1399 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1400 /* 1401 * Either a marginal timeout occurred 1402 * (got the interrupt just as timer expired), 1403 * or we were "sleeping" to give other devices a chance. 1404 * Either way, we don't really want to complain about anything. 1405 */ 1406 if (hwgroup->sleeping) { 1407 hwgroup->sleeping = 0; 1408 hwgroup->busy = 0; 1409 } 1410 } else { 1411 ide_drive_t *drive = hwgroup->drive; 1412 if (!drive) { 1413 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1414 hwgroup->handler = NULL; 1415 } else { 1416 ide_hwif_t *hwif; 1417 ide_startstop_t startstop = ide_stopped; 1418 if (!hwgroup->busy) { 1419 hwgroup->busy = 1; /* paranoia */ 1420 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1421 } 1422 if ((expiry = hwgroup->expiry) != NULL) { 1423 /* continue */ 1424 if ((wait = expiry(drive)) > 0) { 1425 /* reset timer */ 1426 hwgroup->timer.expires = jiffies + wait; 1427 hwgroup->req_gen_timer = hwgroup->req_gen; 1428 add_timer(&hwgroup->timer); 1429 spin_unlock_irqrestore(&ide_lock, flags); 1430 return; 1431 } 1432 } 1433 hwgroup->handler = NULL; 1434 /* 1435 * We need to simulate a real interrupt when invoking 1436 * the handler() function, which means we need to 1437 * globally mask the specific IRQ: 1438 */ 1439 spin_unlock(&ide_lock); 1440 hwif = HWIF(drive); 1441#if DISABLE_IRQ_NOSYNC 1442 disable_irq_nosync(hwif->irq); 1443#else 1444 /* disable_irq_nosync ?? */ 1445 disable_irq(hwif->irq); 1446#endif /* DISABLE_IRQ_NOSYNC */ 1447 /* local CPU only, 1448 * as if we were handling an interrupt */ 1449 local_irq_disable(); 1450 if (hwgroup->polling) { 1451 startstop = handler(drive); 1452 } else if (drive_is_ready(drive)) { 1453 if (drive->waiting_for_dma) 1454 (void) hwgroup->hwif->ide_dma_lostirq(drive); 1455 (void)ide_ack_intr(hwif); 1456 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1457 startstop = handler(drive); 1458 } else { 1459 if (drive->waiting_for_dma) { 1460 startstop = ide_dma_timeout_retry(drive, wait); 1461 } else 1462 startstop = 1463 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); 1464 } 1465 drive->service_time = jiffies - drive->service_start; 1466 spin_lock_irq(&ide_lock); 1467 enable_irq(hwif->irq); 1468 if (startstop == ide_stopped) 1469 hwgroup->busy = 0; 1470 } 1471 } 1472 ide_do_request(hwgroup, IDE_NO_IRQ); 1473 spin_unlock_irqrestore(&ide_lock, flags); 1474} 1475 1476/** 1477 * unexpected_intr - handle an unexpected IDE interrupt 1478 * @irq: interrupt line 1479 * @hwgroup: hwgroup being processed 1480 * 1481 * There's nothing really useful we can do with an unexpected interrupt, 1482 * other than reading the status register (to clear it), and logging it. 1483 * There should be no way that an irq can happen before we're ready for it, 1484 * so we needn't worry much about losing an "important" interrupt here. 1485 * 1486 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1487 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1488 * looks "good", we just ignore the interrupt completely. 1489 * 1490 * This routine assumes __cli() is in effect when called. 1491 * 1492 * If an unexpected interrupt happens on irq15 while we are handling irq14 1493 * and if the two interfaces are "serialized" (CMD640), then it looks like 1494 * we could screw up by interfering with a new request being set up for 1495 * irq15. 1496 * 1497 * In reality, this is a non-issue. The new command is not sent unless 1498 * the drive is ready to accept one, in which case we know the drive is 1499 * not trying to interrupt us. And ide_set_handler() is always invoked 1500 * before completing the issuance of any new drive command, so we will not 1501 * be accidentally invoked as a result of any valid command completion 1502 * interrupt. 1503 * 1504 * Note that we must walk the entire hwgroup here. We know which hwif 1505 * is doing the current command, but we don't know which hwif burped 1506 * mysteriously. 1507 */ 1508 1509static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1510{ 1511 u8 stat; 1512 ide_hwif_t *hwif = hwgroup->hwif; 1513 1514 /* 1515 * handle the unexpected interrupt 1516 */ 1517 do { 1518 if (hwif->irq == irq) { 1519 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1520 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1521 /* Try to not flood the console with msgs */ 1522 static unsigned long last_msgtime, count; 1523 ++count; 1524 if (time_after(jiffies, last_msgtime + HZ)) { 1525 last_msgtime = jiffies; 1526 printk(KERN_ERR "%s%s: unexpected interrupt, " 1527 "status=0x%02x, count=%ld\n", 1528 hwif->name, 1529 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1530 } 1531 } 1532 } 1533 } while ((hwif = hwif->next) != hwgroup->hwif); 1534} 1535 1536/** 1537 * ide_intr - default IDE interrupt handler 1538 * @irq: interrupt number 1539 * @dev_id: hwif group 1540 * @regs: unused weirdness from the kernel irq layer 1541 * 1542 * This is the default IRQ handler for the IDE layer. You should 1543 * not need to override it. If you do be aware it is subtle in 1544 * places 1545 * 1546 * hwgroup->hwif is the interface in the group currently performing 1547 * a command. hwgroup->drive is the drive and hwgroup->handler is 1548 * the IRQ handler to call. As we issue a command the handlers 1549 * step through multiple states, reassigning the handler to the 1550 * next step in the process. Unlike a smart SCSI controller IDE 1551 * expects the main processor to sequence the various transfer 1552 * stages. We also manage a poll timer to catch up with most 1553 * timeout situations. There are still a few where the handlers 1554 * don't ever decide to give up. 1555 * 1556 * The handler eventually returns ide_stopped to indicate the 1557 * request completed. At this point we issue the next request 1558 * on the hwgroup and the process begins again. 1559 */ 1560 1561irqreturn_t ide_intr (int irq, void *dev_id) 1562{ 1563 unsigned long flags; 1564 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1565 ide_hwif_t *hwif; 1566 ide_drive_t *drive; 1567 ide_handler_t *handler; 1568 ide_startstop_t startstop; 1569 1570 spin_lock_irqsave(&ide_lock, flags); 1571 hwif = hwgroup->hwif; 1572 1573 if (!ide_ack_intr(hwif)) { 1574 spin_unlock_irqrestore(&ide_lock, flags); 1575 return IRQ_NONE; 1576 } 1577 1578 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1579#ifdef CONFIG_BLK_DEV_IDEPCI 1580 if (hwif->pci_dev && !hwif->pci_dev->vendor) 1581#endif /* CONFIG_BLK_DEV_IDEPCI */ 1582 { 1583 /* 1584 * Probably not a shared PCI interrupt, 1585 * so we can safely try to do something about it: 1586 */ 1587 unexpected_intr(irq, hwgroup); 1588#ifdef CONFIG_BLK_DEV_IDEPCI 1589 } else { 1590 /* 1591 * Whack the status register, just in case 1592 * we have a leftover pending IRQ. 1593 */ 1594 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1595#endif /* CONFIG_BLK_DEV_IDEPCI */ 1596 } 1597 spin_unlock_irqrestore(&ide_lock, flags); 1598 return IRQ_NONE; 1599 } 1600 drive = hwgroup->drive; 1601 if (!drive) { 1602 /* 1603 * This should NEVER happen, and there isn't much 1604 * we could do about it here. 1605 * 1606 * [Note - this can occur if the drive is hot unplugged] 1607 */ 1608 spin_unlock_irqrestore(&ide_lock, flags); 1609 return IRQ_HANDLED; 1610 } 1611 if (!drive_is_ready(drive)) { 1612 /* 1613 * This happens regularly when we share a PCI IRQ with 1614 * another device. Unfortunately, it can also happen 1615 * with some buggy drives that trigger the IRQ before 1616 * their status register is up to date. Hopefully we have 1617 * enough advance overhead that the latter isn't a problem. 1618 */ 1619 spin_unlock_irqrestore(&ide_lock, flags); 1620 return IRQ_NONE; 1621 } 1622 if (!hwgroup->busy) { 1623 hwgroup->busy = 1; /* paranoia */ 1624 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1625 } 1626 hwgroup->handler = NULL; 1627 hwgroup->req_gen++; 1628 del_timer(&hwgroup->timer); 1629 spin_unlock(&ide_lock); 1630 1631 /* Some controllers might set DMA INTR no matter DMA or PIO; 1632 * bmdma status might need to be cleared even for 1633 * PIO interrupts to prevent spurious/lost irq. 1634 */ 1635 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1636 /* ide_dma_end() needs bmdma status for error checking. 1637 * So, skip clearing bmdma status here and leave it 1638 * to ide_dma_end() if this is dma interrupt. 1639 */ 1640 hwif->ide_dma_clear_irq(drive); 1641 1642 if (drive->unmask) 1643 local_irq_enable_in_hardirq(); 1644 /* service this interrupt, may set handler for next interrupt */ 1645 startstop = handler(drive); 1646 spin_lock_irq(&ide_lock); 1647 1648 /* 1649 * Note that handler() may have set things up for another 1650 * interrupt to occur soon, but it cannot happen until 1651 * we exit from this routine, because it will be the 1652 * same irq as is currently being serviced here, and Linux 1653 * won't allow another of the same (on any CPU) until we return. 1654 */ 1655 drive->service_time = jiffies - drive->service_start; 1656 if (startstop == ide_stopped) { 1657 if (hwgroup->handler == NULL) { /* paranoia */ 1658 hwgroup->busy = 0; 1659 ide_do_request(hwgroup, hwif->irq); 1660 } else { 1661 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1662 "on exit\n", drive->name); 1663 } 1664 } 1665 spin_unlock_irqrestore(&ide_lock, flags); 1666 return IRQ_HANDLED; 1667} 1668 1669/** 1670 * ide_init_drive_cmd - initialize a drive command request 1671 * @rq: request object 1672 * 1673 * Initialize a request before we fill it in and send it down to 1674 * ide_do_drive_cmd. Commands must be set up by this function. Right 1675 * now it doesn't do a lot, but if that changes abusers will have a 1676 * nasty surprise. 1677 */ 1678 1679void ide_init_drive_cmd (struct request *rq) 1680{ 1681 memset(rq, 0, sizeof(*rq)); 1682 rq->cmd_type = REQ_TYPE_ATA_CMD; 1683 rq->ref_count = 1; 1684} 1685 1686EXPORT_SYMBOL(ide_init_drive_cmd); 1687 1688/** 1689 * ide_do_drive_cmd - issue IDE special command 1690 * @drive: device to issue command 1691 * @rq: request to issue 1692 * @action: action for processing 1693 * 1694 * This function issues a special IDE device request 1695 * onto the request queue. 1696 * 1697 * If action is ide_wait, then the rq is queued at the end of the 1698 * request queue, and the function sleeps until it has been processed. 1699 * This is for use when invoked from an ioctl handler. 1700 * 1701 * If action is ide_preempt, then the rq is queued at the head of 1702 * the request queue, displacing the currently-being-processed 1703 * request and this function returns immediately without waiting 1704 * for the new rq to be completed. This is VERY DANGEROUS, and is 1705 * intended for careful use by the ATAPI tape/cdrom driver code. 1706 * 1707 * If action is ide_end, then the rq is queued at the end of the 1708 * request queue, and the function returns immediately without waiting 1709 * for the new rq to be completed. This is again intended for careful 1710 * use by the ATAPI tape/cdrom driver code. 1711 */ 1712 1713int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1714{ 1715 unsigned long flags; 1716 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1717 DECLARE_COMPLETION_ONSTACK(wait); 1718 int where = ELEVATOR_INSERT_BACK, err; 1719 int must_wait = (action == ide_wait || action == ide_head_wait); 1720 1721 rq->errors = 0; 1722 1723 /* 1724 * we need to hold an extra reference to request for safe inspection 1725 * after completion 1726 */ 1727 if (must_wait) { 1728 rq->ref_count++; 1729 rq->end_io_data = &wait; 1730 rq->end_io = blk_end_sync_rq; 1731 } 1732 1733 spin_lock_irqsave(&ide_lock, flags); 1734 if (action == ide_preempt) 1735 hwgroup->rq = NULL; 1736 if (action == ide_preempt || action == ide_head_wait) { 1737 where = ELEVATOR_INSERT_FRONT; 1738 rq->cmd_flags |= REQ_PREEMPT; 1739 } 1740 __elv_add_request(drive->queue, rq, where, 0); 1741 ide_do_request(hwgroup, IDE_NO_IRQ); 1742 spin_unlock_irqrestore(&ide_lock, flags); 1743 1744 err = 0; 1745 if (must_wait) { 1746 wait_for_completion(&wait); 1747 if (rq->errors) 1748 err = -EIO; 1749 1750 blk_put_request(rq); 1751 } 1752 1753 return err; 1754} 1755 1756EXPORT_SYMBOL(ide_do_drive_cmd); 1757