Lines Matching defs:ha

339 	struct qla_hw_data *ha = vha->hw;
347 sa_id_map = ha->edif_tx_sa_id_map;
350 sa_id_map = ha->edif_rx_sa_id_map;
353 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
355 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
448 struct qla_hw_data *ha = vha->hw;
454 spin_lock_irqsave(&ha->sadb_lock, flags);
456 list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
459 spin_unlock_irqrestore(&ha->sadb_lock, flags);
462 spin_lock_irqsave(&ha->sadb_lock, flags);
467 list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
470 spin_unlock_irqrestore(&ha->sadb_lock, flags);
475 spin_lock_irqsave(&ha->sadb_lock, flags);
479 spin_unlock_irqrestore(&ha->sadb_lock, flags);
1858 struct qla_hw_data *ha = vha->hw;
1873 ha->pdev->device);
2310 struct qla_hw_data *ha = vha->hw;
2312 if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2314 ha->edif_post_stop_cnt_down) {
2315 ha->edif_post_stop_cnt_down--;
2322 if (ha->edif_post_stop_cnt_down == 0) {
2329 ha->edif_post_stop_cnt_down = 60;
2674 struct qla_hw_data *ha = vha->hw;
2683 sa_id_map = ha->edif_tx_sa_id_map;
2685 sa_id_map = ha->edif_rx_sa_id_map;
2687 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2690 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2694 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2731 struct qla_hw_data *ha = vha->hw;
2738 sa_list = &ha->sadb_tx_index_list;
2740 sa_list = &ha->sadb_rx_index_list;
2750 spin_lock_irqsave(&ha->sadb_lock, flags);
2770 spin_unlock_irqrestore(&ha->sadb_lock, flags);
2983 struct qla_hw_data *ha = vha->hw;
3025 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3043 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3169 if (ha->flags.fcp_prio_enabled)
3259 struct qla_hw_data *ha = vha->hw;
3268 sa_list = &ha->sadb_tx_index_list;
3270 sa_list = &ha->sadb_rx_index_list;
3298 spin_lock_irqsave(&ha->sadb_lock, flags);
3300 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3308 spin_lock_irqsave(&ha->sadb_lock, flags);
3316 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3325 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3348 spin_lock_irqsave(&ha->sadb_lock, flags);
3351 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3360 void qla_edif_sadb_release(struct qla_hw_data *ha)
3364 list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
3369 list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
3380 int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3382 ha->edif_tx_sa_id_map =
3385 if (!ha->edif_tx_sa_id_map) {
3386 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3391 ha->edif_rx_sa_id_map =
3393 if (!ha->edif_rx_sa_id_map) {
3394 kfree(ha->edif_tx_sa_id_map);
3395 ha->edif_tx_sa_id_map = NULL;
3396 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3404 void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3406 kfree(ha->edif_tx_sa_id_map);
3407 ha->edif_tx_sa_id_map = NULL;
3408 kfree(ha->edif_rx_sa_id_map);
3409 ha->edif_rx_sa_id_map = NULL;
3543 struct qla_hw_data *ha = vha->hw;
3587 if (!IS_FWI2_CAPABLE(ha)) {
3605 sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3617 sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3667 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3670 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,