Lines Matching refs:ha

337 	struct qla_hw_data *ha;
343 ha = vha->hw;
344 reg = &ha->iobase->isp;
346 req = ha->req_q_map[0];
347 rsp = ha->rsp_q_map[0];
353 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
361 spin_lock_irqsave(&ha->hardware_lock, flags);
369 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
379 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
416 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
433 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
434 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
448 spin_unlock_irqrestore(&ha->hardware_lock, flags);
461 struct qla_hw_data *ha = vha->hw;
462 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
464 if (IS_P3P_TYPE(ha)) {
476 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
478 } else if (IS_QLA83XX(ha)) {
480 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
481 } else if (IS_QLAFX00(ha)) {
484 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
485 } else if (IS_FWI2_CAPABLE(ha)) {
489 wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
491 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
515 struct qla_hw_data *ha = vha->hw;
516 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
531 if (IS_FWI2_CAPABLE(ha)) {
537 SET_TARGET_ID(ha, mrk->target, loop_id);
542 if (IS_FWI2_CAPABLE(ha))
835 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
891 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
988 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1078 ha->dif_bundle_crossed_pages++;
1089 ha->dif_bundle_writes++;
1091 ha->dif_bundle_reads++;
1134 ha->dif_bundle_kallocs++;
1138 (ha->dif_bundl_pool, GFP_ATOMIC,
1150 ha->dif_bundle_kallocs--;
1153 ha->dif_bundle_dma_allocs++;
1219 ha->dif_bundle_kallocs++;
1224 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1235 ha->dif_bundle_kallocs--;
1238 ha->dif_bundle_dma_allocs++;
1289 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1349 struct qla_hw_data *ha;
1361 ha = vha->hw;
1389 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1465 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1509 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1512 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1520 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1553 struct qla_hw_data *ha = vha->hw;
1567 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1574 spin_lock_irqsave(&ha->hardware_lock, flags);
1582 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1599 if (IS_SHADOW_REG_CAPABLE(ha)) {
1675 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1710 struct qla_hw_data *ha = vha->hw;
1733 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1740 spin_lock_irqsave(&ha->hardware_lock, flags);
1749 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1779 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1806 if (IS_SHADOW_REG_CAPABLE(ha)) {
1879 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1891 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1917 struct qla_hw_data *ha = vha->hw;
1949 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1966 if (IS_SHADOW_REG_CAPABLE(ha)) {
2078 struct qla_hw_data *ha = vha->hw;
2131 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2161 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2188 if (IS_SHADOW_REG_CAPABLE(ha)) {
2285 struct qla_hw_data *ha = vha->hw;
2287 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2305 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2306 IS_QLA28XX(ha))
2308 else if (IS_P3P_TYPE(ha))
2310 else if (IS_FWI2_CAPABLE(ha))
2312 else if (IS_QLAFX00(ha))
2316 ISP_REQ_Q_OUT(ha, &reg->isp));
2350 if (IS_QLAFX00(ha)) {
2447 struct qla_hw_data *ha = sp->vha->hw;
2452 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2456 if (HAS_EXTENDED_IDS(ha)) {
2494 struct qla_hw_data *ha = sp->vha->hw;
2497 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2499 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2521 struct qla_hw_data *ha = sp->vha->hw;
2524 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2526 if (HAS_EXTENDED_IDS(ha)) {
2532 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2533 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2534 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2535 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2546 struct qla_hw_data *ha = vha->hw;
2557 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2708 struct qla_hw_data *ha = vha->hw;
2747 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
3049 struct qla_hw_data *ha = vha->hw;
3079 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3088 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3101 (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp),
3102 sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp));
3230 struct qla_hw_data *ha = vha->hw;
3238 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3297 struct qla_hw_data *ha = vha->hw;
3332 vha, ha->req_q_map[0]);
3352 ha->req_q_map[0]);
3387 struct qla_hw_data *ha = vha->hw;
3393 reg = &ha->iobase->isp82;
3396 rsp = ha->rsp_q_map[0];
3401 dbval = 0x04 | (ha->portnum << 5);
3405 if (qla2x00_marker(vha, ha->base_qpair,
3415 spin_lock_irqsave(&ha->hardware_lock, flags);
3423 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3461 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3492 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3554 if (ha->flags.fcp_prio_enabled)
3613 if (ha->flags.fcp_prio_enabled)
3655 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3657 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3659 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3660 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3670 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3674 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3680 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3926 struct qla_hw_data *ha = vha->hw;
3951 IS_FWI2_CAPABLE(ha) ?
3959 IS_FWI2_CAPABLE(ha) ?
3972 IS_FWI2_CAPABLE(ha) ?
3977 IS_FWI2_CAPABLE(ha) ?
3982 IS_QLAFX00(ha) ?
3994 IS_QLAFX00(ha) ?
4141 struct qla_hw_data *ha = vha->hw;
4154 rsp = ha->rsp_q_map[0];
4159 if (qla2x00_marker(vha, ha->base_qpair,
4166 spin_lock_irqsave(&ha->hardware_lock, flags);
4179 if (IS_SHADOW_REG_CAPABLE(ha)) {
4224 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4250 struct qla_hw_data *ha = vha->hw;
4284 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
4326 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
4342 if (IS_SHADOW_REG_CAPABLE(ha)) {
4361 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
4430 if (ha->flags.fcp_prio_enabled)
4475 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
4484 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);