• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/ide/

Lines Matching defs:rq

57 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
66 if (blk_noretry_request(rq) && end_io_error(uptodate))
67 nr_sectors = rq->hard_nr_sectors;
69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
70 rq->errors = -EIO;
81 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
82 add_disk_randomness(rq->rq_disk);
83 if (!list_empty(&rq->queuelist))
84 blkdev_dequeue_request(rq);
85 HWGROUP(drive)->rq = NULL;
86 end_that_request_last(rq, uptodate);
106 struct request *rq;
115 rq = HWGROUP(drive)->rq;
118 nr_sectors = rq->hard_cur_sectors;
120 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
142 static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
144 struct request_pm_state *pm = rq->data;
168 static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
170 struct request_pm_state *pm = rq->data;
171 ide_task_t *args = rq->special;
181 ide_complete_power_step(drive, rq, 0, 0);
207 ide_complete_power_step(drive, rq, 0, 0);
249 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
257 BUG_ON(!blk_rq_started(rq));
263 if (blk_noretry_request(rq) && end_io_error(uptodate))
264 nr_sectors = rq->hard_nr_sectors;
266 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
267 rq->errors = -EIO;
278 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
279 add_disk_randomness(rq->rq_disk);
280 if (blk_rq_tagged(rq))
281 blk_queue_end_tag(drive->queue, rq);
282 end_that_request_last(rq, uptodate);
294 * @rq: request
299 static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
305 blk_pm_suspend_request(rq) ? "suspend" : "resume");
308 if (blk_pm_suspend_request(rq)) {
314 blkdev_dequeue_request(rq);
315 HWGROUP(drive)->rq = NULL;
316 end_that_request_last(rq, 1);
370 struct request *rq;
373 rq = HWGROUP(drive)->rq;
376 if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
377 u8 *args = (u8 *) rq->buffer;
378 if (rq->errors == 0)
379 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
386 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
387 u8 *args = (u8 *) rq->buffer;
388 if (rq->errors == 0)
389 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
400 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
401 ide_task_t *args = (ide_task_t *) rq->special;
402 if (rq->errors == 0)
403 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
430 } else if (blk_pm_request(rq)) {
431 struct request_pm_state *pm = rq->data;
434 drive->name, rq->pm->pm_step, stat, err);
436 ide_complete_power_step(drive, rq, stat, err);
438 ide_complete_pm_request(drive, rq);
443 blkdev_dequeue_request(rq);
444 HWGROUP(drive)->rq = NULL;
445 rq->errors = err;
446 end_that_request_last(rq, !rq->errors);
477 static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
479 if (rq->rq_disk) {
482 drv = *(ide_driver_t **)rq->rq_disk->private_data;
488 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
494 rq->errors |= ERROR_RESET;
507 rq->errors = ERROR_MAX;
510 rq->errors |= ERROR_RECAL;
514 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0)
517 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
518 ide_kill_rq(drive, rq);
523 rq->errors |= ERROR_RESET;
525 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
526 ++rq->errors;
530 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
533 ++rq->errors;
538 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
544 rq->errors |= ERROR_RESET;
553 if (rq->errors >= ERROR_MAX) {
554 ide_kill_rq(drive, rq);
556 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
557 ++rq->errors;
560 ++rq->errors;
567 __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
570 return ide_ata_error(drive, rq, stat, err);
571 return ide_atapi_error(drive, rq, stat, err);
591 struct request *rq;
596 if ((rq = HWGROUP(drive)->rq) == NULL)
600 if (!blk_fs_request(rq)) {
601 rq->errors = 1;
606 if (rq->rq_disk) {
609 drv = *(ide_driver_t **)rq->rq_disk->private_data;
610 return drv->error(drive, rq, stat, err);
612 return __ide_error(drive, rq, stat, err);
617 ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
620 rq->errors |= ERROR_RESET;
622 ide_kill_rq(drive, rq);
645 struct request *rq;
647 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
651 if (!blk_fs_request(rq)) {
652 rq->errors = 1;
657 if (rq->rq_disk) {
660 drv = *(ide_driver_t **)rq->rq_disk->private_data;
661 return drv->abort(drive, rq);
663 return __ide_abort(drive, rq);
700 struct request *rq = HWGROUP(drive)->rq;
702 u8 *args = (u8 *) rq->buffer;
813 void ide_map_sg(ide_drive_t *drive, struct request *rq)
821 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
822 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
824 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
831 void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
835 hwif->nsect = hwif->nleft = rq->nr_sectors;
844 * @rq: the request structure holding the command
854 struct request *rq)
857 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
858 ide_task_t *args = rq->special;
870 ide_init_sg_cmd(drive, rq);
871 ide_map_sg(drive, rq);
879 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
880 u8 *args = rq->buffer;
905 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
906 u8 *args = rq->buffer;
944 static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
946 struct request_pm_state *pm = rq->data;
948 if (blk_pm_suspend_request(rq) &&
952 else if (blk_pm_resume_request(rq) &&
978 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
983 BUG_ON(!blk_rq_started(rq));
987 HWIF(drive)->name, (unsigned long) rq);
995 block = rq->sector;
996 if (blk_fs_request(rq) &&
1005 if (blk_pm_request(rq))
1006 ide_check_pm_state(drive, rq);
1023 if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
1024 rq->cmd_type == REQ_TYPE_ATA_TASK ||
1025 rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
1026 return execute_drive_cmd(drive, rq);
1027 else if (blk_pm_request(rq)) {
1028 struct request_pm_state *pm = rq->data;
1031 drive->name, rq->pm->pm_step);
1033 startstop = ide_start_power_step(drive, rq);
1036 ide_complete_pm_request(drive, rq);
1040 drv = *(ide_driver_t **)rq->rq_disk->private_data;
1041 return drv->do_request(drive, rq, block);
1045 ide_kill_rq(drive, rq);
1175 struct request *rq;
1191 hwgroup->rq = NULL;
1251 rq = elv_next_request(drive->queue);
1252 if (!rq) {
1273 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
1282 hwgroup->rq = rq;
1297 startstop = start_request(drive, rq);
1324 struct request *rq;
1354 rq = HWGROUP(drive)->rq;
1356 if (!rq)
1359 HWGROUP(drive)->rq = NULL;
1361 rq->errors = 0;
1363 if (!rq->bio)
1366 rq->sector = rq->bio->bi_sector;
1367 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1368 rq->hard_cur_sectors = rq->current_nr_sectors;
1369 rq->buffer = bio_data(rq->bio);
1671 * @rq: request object
1679 void ide_init_drive_cmd (struct request *rq)
1681 memset(rq, 0, sizeof(*rq));
1682 rq->cmd_type = REQ_TYPE_ATA_CMD;
1683 rq->ref_count = 1;
1691 * @rq: request to issue
1697 * If action is ide_wait, then the rq is queued at the end of the
1701 * If action is ide_preempt, then the rq is queued at the head of
1704 * for the new rq to be completed. This is VERY DANGEROUS, and is
1707 * If action is ide_end, then the rq is queued at the end of the
1709 * for the new rq to be completed. This is again intended for careful
1713 int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1721 rq->errors = 0;
1728 rq->ref_count++;
1729 rq->end_io_data = &wait;
1730 rq->end_io = blk_end_sync_rq;
1735 hwgroup->rq = NULL;
1738 rq->cmd_flags |= REQ_PREEMPT;
1740 __elv_add_request(drive->queue, rq, where, 0);
1747 if (rq->errors)
1750 blk_put_request(rq);