• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/ata/

Lines Matching refs:qc

1328 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1330 struct completion *waiting = qc->private_data;
1363 struct ata_queued_cmd *qc;
1379 /* initialize internal qc */
1388 qc = __ata_qc_from_tag(ap, tag);
1390 qc->tag = tag;
1391 qc->scsicmd = NULL;
1392 qc->ap = ap;
1393 qc->dev = dev;
1394 ata_qc_reinit(qc);
1403 /* prepare & issue qc */
1404 qc->tf = *tf;
1406 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1407 qc->flags |= ATA_QCFLAG_RESULT_TF;
1408 qc->dma_dir = dma_dir;
1415 ata_sg_init(qc, sg, n_elem);
1416 qc->nbytes = buflen;
1419 qc->private_data = &wait;
1420 qc->complete_fn = ata_qc_complete_internal;
1422 ata_qc_issue(qc);
1434 * following test prevents us from completing the qc
1438 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1439 qc->err_mask |= AC_ERR_TIMEOUT;
1444 ata_qc_complete(qc);
1448 "qc timeout (cmd 0x%x)\n", command);
1456 ap->ops->post_internal_cmd(qc);
1459 if (qc->flags & ATA_QCFLAG_FAILED) {
1460 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1461 qc->err_mask |= AC_ERR_DEV;
1463 if (!qc->err_mask)
1464 qc->err_mask |= AC_ERR_OTHER;
1466 if (qc->err_mask & ~AC_ERR_OTHER)
1467 qc->err_mask &= ~AC_ERR_OTHER;
1473 *tf = qc->result_tf;
1474 err_mask = qc->err_mask;
1476 ata_qc_free(qc);
3970 * @qc: Command containing DMA memory to be released
3977 void ata_sg_clean(struct ata_queued_cmd *qc)
3979 struct ata_port *ap = qc->ap;
3980 struct scatterlist *sg = qc->__sg;
3981 int dir = qc->dma_dir;
3984 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3987 if (qc->flags & ATA_QCFLAG_SINGLE)
3988 WARN_ON(qc->n_elem > 1);
3990 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3996 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3997 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3999 if (qc->flags & ATA_QCFLAG_SG) {
4000 if (qc->n_elem)
4001 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4003 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4005 struct scatterlist *psg = &qc->pad_sgent;
4007 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4011 if (qc->n_elem)
4016 sg->length += qc->pad_len;
4018 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4019 pad_buf, qc->pad_len);
4022 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4023 qc->__sg = NULL;
4028 * @qc: Metadata associated with taskfile to be transferred
4037 static void ata_fill_sg(struct ata_queued_cmd *qc)
4039 struct ata_port *ap = qc->ap;
4043 WARN_ON(qc->__sg == NULL);
4044 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4047 ata_for_each_sg(sg, qc) {
4080 * @qc: Metadata associated with taskfile to check
4092 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4094 struct ata_port *ap = qc->ap;
4099 if (unlikely(qc->nbytes & 15))
4103 return ap->ops->check_atapi_dma(qc);
4110 * @qc: Metadata associated with taskfile to be prepared
4117 void ata_qc_prep(struct ata_queued_cmd *qc)
4119 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4122 ata_fill_sg(qc);
4125 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4129 * @qc: Command to be associated
4133 * Initialize the data-related elements of queued_cmd @qc
4140 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4142 qc->flags |= ATA_QCFLAG_SINGLE;
4144 qc->__sg = &qc->sgent;
4145 qc->n_elem = 1;
4146 qc->orig_n_elem = 1;
4147 qc->buf_virt = buf;
4148 qc->nbytes = buflen;
4150 sg_init_one(&qc->sgent, buf, buflen);
4155 * @qc: Command to be associated
4159 * Initialize the data-related elements of queued_cmd @qc
4167 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4170 qc->flags |= ATA_QCFLAG_SG;
4171 qc->__sg = sg;
4172 qc->n_elem = n_elem;
4173 qc->orig_n_elem = n_elem;
4178 * @qc: Command with memory buffer to be mapped.
4180 * DMA-map the memory buffer associated with queued_cmd @qc.
4189 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4191 struct ata_port *ap = qc->ap;
4192 int dir = qc->dma_dir;
4193 struct scatterlist *sg = qc->__sg;
4198 qc->pad_len = sg->length & 3;
4199 if (qc->pad_len) {
4200 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4201 struct scatterlist *psg = &qc->pad_sgent;
4203 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4207 if (qc->tf.flags & ATA_TFLAG_WRITE)
4208 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4209 qc->pad_len);
4211 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4214 sg->length -= qc->pad_len;
4219 sg->length, qc->pad_len);
4223 qc->n_elem--;
4227 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4231 sg->length += qc->pad_len;
4240 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4247 * @qc: Command with scatter-gather table to be mapped.
4249 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4259 static int ata_sg_setup(struct ata_queued_cmd *qc)
4261 struct ata_port *ap = qc->ap;
4262 struct scatterlist *sg = qc->__sg;
4263 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4267 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4270 qc->pad_len = lsg->length & 3;
4271 if (qc->pad_len) {
4272 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4273 struct scatterlist *psg = &qc->pad_sgent;
4276 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4284 offset = lsg->offset + lsg->length - qc->pad_len;
4288 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4290 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4294 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4297 lsg->length -= qc->pad_len;
4302 qc->n_elem - 1, lsg->length, qc->pad_len);
4305 pre_n_elem = qc->n_elem;
4314 dir = qc->dma_dir;
4318 lsg->length += qc->pad_len;
4325 qc->n_elem = n_elem;
4416 * @qc: Command on going
4418 * Transfer qc->sect_size bytes of data from/to the ATA device.
4424 static void ata_pio_sector(struct ata_queued_cmd *qc)
4426 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4427 struct scatterlist *sg = qc->__sg;
4428 struct ata_port *ap = qc->ap;
4433 if (qc->curbytes == qc->nbytes - qc->sect_size)
4436 page = sg[qc->cursg].page;
4437 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4443 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4452 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4458 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4461 qc->curbytes += qc->sect_size;
4462 qc->cursg_ofs += qc->sect_size;
4464 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4465 qc->cursg++;
4466 qc->cursg_ofs = 0;
4472 * @qc: Command on going
4481 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4483 if (is_multi_taskfile(&qc->tf)) {
4487 WARN_ON(qc->dev->multi_count == 0);
4489 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4490 qc->dev->multi_count);
4492 ata_pio_sector(qc);
4494 ata_pio_sector(qc);
4500 * @qc: Taskfile currently active
4509 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4513 WARN_ON(qc->dev->cdb_len < 12);
4515 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4518 switch (qc->tf.protocol) {
4528 ap->ops->bmdma_start(qc);
4535 * @qc: Command on going
4545 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4547 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4548 struct scatterlist *sg = qc->__sg;
4549 struct ata_port *ap = qc->ap;
4554 if (qc->curbytes + bytes >= qc->nbytes)
4558 if (unlikely(qc->cursg >= qc->n_elem)) {
4560 * The end of qc->sg is reached and the device expects
4561 * more data to transfer. In order not to overrun qc->sg
4571 ata_dev_printk(qc->dev, KERN_WARNING,
4575 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4581 sg = &qc->__sg[qc->cursg];
4584 offset = sg->offset + qc->cursg_ofs;
4591 count = min(sg->length - qc->cursg_ofs, bytes);
4596 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4605 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4611 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4615 qc->curbytes += count;
4616 qc->cursg_ofs += count;
4618 if (qc->cursg_ofs == sg->length) {
4619 qc->cursg++;
4620 qc->cursg_ofs = 0;
4629 * @qc: Command on going
4637 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4639 struct ata_port *ap = qc->ap;
4640 struct ata_device *dev = qc->dev;
4642 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4644 /* Abuse qc->result_tf for temp storage of intermediate TF
4646 * For normal completion, qc->result_tf is not relevant. For
4647 * error, qc->result_tf is later overwritten by ata_qc_complete().
4648 * So, the correctness of qc->result_tf is not affected.
4650 ap->ops->tf_read(ap, &qc->result_tf);
4651 ireason = qc->result_tf.nsect;
4652 bc_lo = qc->result_tf.lbam;
4653 bc_hi = qc->result_tf.lbah;
4667 __atapi_pio_bytes(qc, bytes);
4673 qc->err_mask |= AC_ERR_HSM;
4678 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4680 * @qc: qc on going
4686 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4688 if (qc->tf.flags & ATA_TFLAG_POLLING)
4692 if (qc->tf.protocol == ATA_PROT_PIO &&
4693 (qc->tf.flags & ATA_TFLAG_WRITE))
4696 if (is_atapi_taskfile(&qc->tf) &&
4697 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4705 * ata_hsm_qc_complete - finish a qc running on standard HSM
4706 * @qc: Command to complete
4709 * Finish @qc which is running on standard HSM.
4715 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4717 struct ata_port *ap = qc->ap;
4727 qc = ata_qc_from_tag(ap, qc->tag);
4728 if (qc) {
4729 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4731 ata_qc_complete(qc);
4738 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4739 ata_qc_complete(qc);
4747 ata_qc_complete(qc);
4750 ata_qc_complete(qc);
4757 * @qc: qc on going
4764 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4770 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4774 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4776 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4780 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4790 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4797 qc->err_mask |= AC_ERR_DEV;
4800 qc->err_mask |= AC_ERR_HSM;
4815 qc->err_mask |= AC_ERR_HSM;
4828 if (qc->tf.protocol == ATA_PROT_PIO) {
4838 ata_pio_sectors(qc);
4842 atapi_send_cdb(ap, qc);
4854 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4874 qc->err_mask |= AC_ERR_HSM;
4879 atapi_pio_bytes(qc);
4891 qc->err_mask |= AC_ERR_DEV;
4897 qc->err_mask |= AC_ERR_HSM |
4916 qc->err_mask |= AC_ERR_DEV;
4918 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4919 ata_pio_sectors(qc);
4925 qc->err_mask |= AC_ERR_HSM;
4935 ata_pio_sectors(qc);
4938 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4952 qc->err_mask |= __ac_err_mask(status);
4959 ap->print_id, qc->dev->devno, status);
4961 WARN_ON(qc->err_mask);
4966 ata_hsm_qc_complete(qc, in_wq);
4972 /* make sure qc->err_mask is available to
4975 WARN_ON(qc->err_mask == 0);
4980 ata_hsm_qc_complete(qc, in_wq);
4996 struct ata_queued_cmd *qc = ap->port_task_data;
5015 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5021 poll_next = ata_hsm_move(ap, qc, status, 1);
5041 struct ata_queued_cmd *qc = NULL;
5051 qc = __ata_qc_from_tag(ap, i);
5055 if (qc)
5056 qc->tag = i;
5058 return qc;
5072 struct ata_queued_cmd *qc;
5074 qc = ata_qc_new(ap);
5075 if (qc) {
5076 qc->scsicmd = NULL;
5077 qc->ap = ap;
5078 qc->dev = dev;
5080 ata_qc_reinit(qc);
5083 return qc;
5088 * @qc: Command to complete
5096 void ata_qc_free(struct ata_queued_cmd *qc)
5098 struct ata_port *ap = qc->ap;
5101 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5103 qc->flags = 0;
5104 tag = qc->tag;
5106 qc->tag = ATA_TAG_POISON;
5111 void __ata_qc_complete(struct ata_queued_cmd *qc)
5113 struct ata_port *ap = qc->ap;
5115 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5116 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5118 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5119 ata_sg_clean(qc);
5121 /* command should be marked inactive atomically with qc completion */
5122 if (qc->tf.protocol == ATA_PROT_NCQ)
5123 ap->sactive &= ~(1 << qc->tag);
5127 /* atapi: mark qc as inactive to prevent the interrupt handler
5131 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5132 ap->qc_active &= ~(1 << qc->tag);
5135 qc->complete_fn(qc);
5138 static void fill_result_tf(struct ata_queued_cmd *qc)
5140 struct ata_port *ap = qc->ap;
5142 qc->result_tf.flags = qc->tf.flags;
5143 ap->ops->tf_read(ap, &qc->result_tf);
5148 * @qc: Command to complete
5157 void ata_qc_complete(struct ata_queued_cmd *qc)
5159 struct ata_port *ap = qc->ap;
5164 if (unlikely(qc->err_mask))
5165 qc->flags |= ATA_QCFLAG_FAILED;
5167 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5168 if (!ata_tag_internal(qc->tag)) {
5169 /* always fill result TF for failed qc */
5170 fill_result_tf(qc);
5171 ata_qc_schedule_eh(qc);
5177 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5178 fill_result_tf(qc);
5180 __ata_qc_complete(qc);
5182 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5186 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5187 fill_result_tf(qc);
5189 __ata_qc_complete(qc);
5197 * @finish_qc: LLDD callback invoked before completing a qc
5226 struct ata_queued_cmd *qc;
5231 if ((qc = ata_qc_from_tag(ap, i))) {
5233 finish_qc(qc);
5234 ata_qc_complete(qc);
5242 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5244 struct ata_port *ap = qc->ap;
5246 switch (qc->tf.protocol) {
5268 * @qc: command to issue to device
5278 void ata_qc_issue(struct ata_queued_cmd *qc)
5280 struct ata_port *ap = qc->ap;
5283 * check is skipped for old EH because it reuses active qc to
5288 if (qc->tf.protocol == ATA_PROT_NCQ) {
5289 WARN_ON(ap->sactive & (1 << qc->tag));
5290 ap->sactive |= 1 << qc->tag;
5293 ap->active_tag = qc->tag;
5296 qc->flags |= ATA_QCFLAG_ACTIVE;
5297 ap->qc_active |= 1 << qc->tag;
5299 if (ata_should_dma_map(qc)) {
5300 if (qc->flags & ATA_QCFLAG_SG) {
5301 if (ata_sg_setup(qc))
5303 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5304 if (ata_sg_setup_one(qc))
5308 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5311 ap->ops->qc_prep(qc);
5313 qc->err_mask |= ap->ops->qc_issue(qc);
5314 if (unlikely(qc->err_mask))
5319 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5320 qc->err_mask |= AC_ERR_SYSTEM;
5322 ata_qc_complete(qc);
5327 * @qc: command to issue to device
5343 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5345 struct ata_port *ap = qc->ap;
5351 switch (qc->tf.protocol) {
5356 qc->tf.flags |= ATA_TFLAG_POLLING;
5359 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5369 ata_dev_select(ap, qc->dev->devno, 1, 0);
5372 switch (qc->tf.protocol) {
5374 if (qc->tf.flags & ATA_TFLAG_POLLING)
5375 ata_qc_set_polling(qc);
5377 ata_tf_to_host(ap, &qc->tf);
5380 if (qc->tf.flags & ATA_TFLAG_POLLING)
5381 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5386 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5388 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5389 ap->ops->bmdma_setup(qc); /* set up bmdma */
5390 ap->ops->bmdma_start(qc); /* initiate bmdma */
5395 if (qc->tf.flags & ATA_TFLAG_POLLING)
5396 ata_qc_set_polling(qc);
5398 ata_tf_to_host(ap, &qc->tf);
5400 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5403 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5412 if (qc->tf.flags & ATA_TFLAG_POLLING)
5413 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5424 if (qc->tf.flags & ATA_TFLAG_POLLING)
5425 ata_qc_set_polling(qc);
5427 ata_tf_to_host(ap, &qc->tf);
5432 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5433 (qc->tf.flags & ATA_TFLAG_POLLING))
5434 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5438 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5440 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5441 ap->ops->bmdma_setup(qc); /* set up bmdma */
5445 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5446 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5460 * @qc: Taskfile currently active in engine
5474 struct ata_queued_cmd *qc)
5480 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5491 * No need to check is_atapi_taskfile(&qc->tf) again.
5493 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5497 if (qc->tf.protocol == ATA_PROT_DMA ||
5498 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5509 ap->ops->bmdma_stop(qc);
5513 qc->err_mask |= AC_ERR_HOST_BUS;
5537 ata_hsm_move(ap, qc, status, 0);
5539 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5540 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5589 struct ata_queued_cmd *qc;
5591 qc = ata_qc_from_tag(ap, ap->active_tag);
5592 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5593 (qc->flags & ATA_QCFLAG_ACTIVE))
5594 handled |= ata_host_intr(ap, qc);
6697 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6704 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)