Lines Matching refs:ha

15 qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
20 if ((req_cnt + 2) >= ha->req_q_count) {
21 cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
22 if (ha->request_in < cnt)
23 ha->req_q_count = cnt - ha->request_in;
25 ha->req_q_count = REQUEST_QUEUE_DEPTH -
26 (ha->request_in - cnt);
30 if ((req_cnt + 2) < ha->req_q_count)
36 static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
39 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
40 ha->request_in = 0;
41 ha->request_ptr = ha->request_ring;
43 ha->request_in++;
44 ha->request_ptr++;
50 * @ha: Pointer to host adapter structure.
58 static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
63 if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
64 *queue_entry = ha->request_ptr;
67 qla4xxx_advance_req_ring_ptr(ha);
68 ha->req_q_count -= req_cnt;
77 * @ha: Pointer to host adapter structure.
84 int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
92 spin_lock_irqsave(&ha->hardware_lock, flags);
95 if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
110 ha->isp_ops->queue_iocb(ha);
113 spin_unlock_irqrestore(&ha->hardware_lock, flags);
118 qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
122 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
124 qla4xxx_advance_req_ring_ptr(ha);
129 cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
151 struct scsi_qla_host *ha;
159 ha = srb->ha;
177 cont_entry = qla4xxx_alloc_cont_entry(ha);
194 void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
196 writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
197 readl(&ha->qla4_83xx_reg->req_q_in);
200 void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
202 writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
203 readl(&ha->qla4_83xx_reg->rsp_q_out);
208 * @ha: pointer to host adapter structure.
213 void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
217 dbval = 0x14 | (ha->func_num << 5);
218 dbval = dbval | (0 << 8) | (ha->request_in << 16);
220 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
225 * @ha: pointer to host adapter structure.
231 void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
233 writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
234 readl(&ha->qla4_82xx_reg->rsp_q_out);
239 * @ha: pointer to host adapter structure.
244 void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
246 writel(ha->request_in, &ha->reg->req_q_in);
247 readl(&ha->reg->req_q_in);
252 * @ha: pointer to host adapter structure.
258 void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
260 writel(ha->response_out, &ha->reg->rsp_q_out);
261 readl(&ha->reg->rsp_q_out);
266 * @ha: pointer to host adapter structure.
272 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
289 spin_lock_irqsave(&ha->hardware_lock, flags);
299 if (!test_bit(AF_ONLINE, &ha->flags)) {
302 ha->host_no, __func__));
313 if (!qla4xxx_space_in_req_ring(ha, req_cnt))
317 if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
321 cmd_entry = (struct command_t3_entry *) ha->request_ptr;
344 ha->bytes_xfered += scsi_bufflen(cmd);
345 if (ha->bytes_xfered & ~0xFFFFF){
346 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
347 ha->bytes_xfered &= 0xFFFFF;
354 qla4xxx_advance_req_ring_ptr(ha);
365 ha->iocb_cnt += req_cnt;
367 ha->req_q_count -= req_cnt;
369 ha->isp_ops->queue_iocb(ha);
370 spin_unlock_irqrestore(&ha->hardware_lock, flags);
378 spin_unlock_irqrestore(&ha->hardware_lock, flags);
388 struct scsi_qla_host *ha = ddb_entry->ha;
394 spin_lock_irqsave(&ha->hardware_lock, flags);
397 if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
400 passthru_iocb = (struct passthru0 *) ha->request_ptr;
436 qla4xxx_advance_req_ring_ptr(ha);
440 ha->iocb_cnt += task_data->iocb_req_cnt;
441 ha->req_q_count -= task_data->iocb_req_cnt;
442 ha->isp_ops->queue_iocb(ha);
446 spin_unlock_irqrestore(&ha->hardware_lock, flags);
450 static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
458 mrb->ha = ha;
462 static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
471 spin_lock_irqsave(&ha->hardware_lock, flags);
474 rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
478 index = ha->mrb_index;
484 if (ha->active_mrb_array[index] == NULL) {
485 ha->mrb_index = index;
491 ha->active_mrb_array[index] = mrb;
499 ha->iocb_cnt += mrb->iocb_cnt;
500 ha->isp_ops->queue_iocb(ha);
502 spin_unlock_irqrestore(&ha->hardware_lock, flags);
506 int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
515 mrb = qla4xxx_get_new_mrb(ha);
517 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
532 rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);