• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/scsi/qla2xxx/

Lines Matching refs:ha

17 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18 static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
87 * @ha: HA context
92 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
97 ha->req_ring_index++;
98 if (ha->req_ring_index == ha->request_q_length) {
99 ha->req_ring_index = 0;
100 ha->request_ring_ptr = ha->request_ring;
102 ha->request_ring_ptr++;
105 cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
116 * @ha: HA context
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
126 ha->req_ring_index++;
127 if (ha->req_ring_index == ha->request_q_length) {
128 ha->req_ring_index = 0;
129 ha->request_ring_ptr = ha->request_ring;
131 ha->request_ring_ptr++;
134 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
156 scsi_qla_host_t *ha;
171 ha = sp->ha;
195 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
225 scsi_qla_host_t *ha;
240 ha = sp->ha;
265 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
296 scsi_qla_host_t *ha;
310 ha = sp->ha;
311 reg = &ha->iobase->isp;
317 if (ha->marker_needed != 0) {
318 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
321 ha->marker_needed = 0;
325 spin_lock_irqsave(&ha->hardware_lock, flags);
328 handle = ha->current_outstanding_cmd;
333 if (ha->outstanding_cmds[handle] == 0)
342 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
349 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
359 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
360 if (ha->req_q_cnt < (req_cnt + 2)) {
361 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
362 if (ha->req_ring_index < cnt)
363 ha->req_q_cnt = cnt - ha->req_ring_index;
365 ha->req_q_cnt = ha->request_q_length -
366 (ha->req_ring_index - cnt);
368 if (ha->req_q_cnt < (req_cnt + 2))
372 ha->current_outstanding_cmd = handle;
373 ha->outstanding_cmds[handle] = sp;
374 sp->ha = ha;
376 ha->req_q_cnt -= req_cnt;
378 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
386 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
397 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
404 ha->req_ring_index++;
405 if (ha->req_ring_index == ha->request_q_length) {
406 ha->req_ring_index = 0;
407 ha->request_ring_ptr = ha->request_ring;
409 ha->request_ring_ptr++;
414 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
415 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
418 if (ha->flags.process_response_queue &&
419 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
420 qla2x00_process_response_queue(ha);
422 spin_unlock_irqrestore(&ha->hardware_lock, flags);
428 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
431 pci_unmap_single(ha->pdev, sp->dma_handle,
434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
441 * @ha: HA context
451 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
458 mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
461 __func__, ha->host_no));
469 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
476 SET_TARGET_ID(ha, mrk->target, loop_id);
482 qla2x00_isp_cmd(ha);
488 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
494 spin_lock_irqsave(&ha->hardware_lock, flags);
495 ret = __qla2x00_marker(ha, loop_id, lun, type);
496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
503 * @ha: HA context
510 qla2x00_req_pkt(scsi_qla_host_t *ha)
512 device_reg_t __iomem *reg = ha->iobase;
521 if ((req_cnt + 2) >= ha->req_q_cnt) {
523 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
528 ISP_REQ_Q_OUT(ha, &reg->isp));
529 if (ha->req_ring_index < cnt)
530 ha->req_q_cnt = cnt - ha->req_ring_index;
532 ha->req_q_cnt = ha->request_q_length -
533 (ha->req_ring_index - cnt);
536 if ((req_cnt + 2) < ha->req_q_cnt) {
537 ha->req_q_cnt--;
538 pkt = ha->request_ring_ptr;
546 pkt->sys_define = (uint8_t)ha->req_ring_index;
555 spin_unlock(&ha->hardware_lock);
561 if (!ha->marker_needed)
562 qla2x00_poll(ha);
564 spin_lock_irq(&ha->hardware_lock);
575 * @ha: HA context
580 qla2x00_isp_cmd(scsi_qla_host_t *ha)
582 device_reg_t __iomem *reg = ha->iobase;
586 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
589 ha->req_ring_index++;
590 if (ha->req_ring_index == ha->request_q_length) {
591 ha->req_ring_index = 0;
592 ha->request_ring_ptr = ha->request_ring;
594 ha->request_ring_ptr++;
597 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
598 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
601 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
602 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
643 scsi_qla_host_t *ha;
658 ha = sp->ha;
689 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
721 scsi_qla_host_t *ha;
735 ha = sp->ha;
736 reg = &ha->iobase->isp24;
742 if (ha->marker_needed != 0) {
743 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
746 ha->marker_needed = 0;
750 spin_lock_irqsave(&ha->hardware_lock, flags);
753 handle = ha->current_outstanding_cmd;
758 if (ha->outstanding_cmds[handle] == 0)
767 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
774 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
784 if (ha->req_q_cnt < (req_cnt + 2)) {
786 if (ha->req_ring_index < cnt)
787 ha->req_q_cnt = cnt - ha->req_ring_index;
789 ha->req_q_cnt = ha->request_q_length -
790 (ha->req_ring_index - cnt);
792 if (ha->req_q_cnt < (req_cnt + 2))
796 ha->current_outstanding_cmd = handle;
797 ha->outstanding_cmds[handle] = sp;
798 sp->ha = ha;
800 ha->req_q_cnt -= req_cnt;
802 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
834 ha->req_ring_index++;
835 if (ha->req_ring_index == ha->request_q_length) {
836 ha->req_ring_index = 0;
837 ha->request_ring_ptr = ha->request_ring;
839 ha->request_ring_ptr++;
844 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
848 if (ha->flags.process_response_queue &&
849 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
850 qla24xx_process_response_queue(ha);
852 spin_unlock_irqrestore(&ha->hardware_lock, flags);
858 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
861 pci_unmap_single(ha->pdev, sp->dma_handle,
864 spin_unlock_irqrestore(&ha->hardware_lock, flags);