Lines Matching defs:eq

84 				     struct lpfc_queue *eq,
574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
597 eqe = lpfc_sli4_eq_get(eq);
603 list_for_each_entry(childq, &eq->child_list, list) {
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
624 eqe = lpfc_sli4_eq_get(eq);
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
641 eqe = lpfc_sli4_eq_get(eq);
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
647 if (!(++count % eq->max_proc_limit))
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
656 eqe = lpfc_sli4_eq_get(eq);
658 eq->EQ_processed += count;
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
664 xchg(&eq->queue_claimed, 0);
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
6147 struct lpfc_queue *eq;
6166 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6168 sli4_hba->sli4_write_eq_db(phba, eq,
7968 * This routine initializes the per-eq idle_stat to dynamically dictate
7978 struct lpfc_queue *eq;
7984 eq = hdwq->hba_eq;
7986 /* Skip if we've already handled this eq's primary CPU */
7987 if (eq->chann != i)
7998 eq->poll_mode = LPFC_QUEUE_WORK;
8000 eq->poll_mode = LPFC_THREADED_IRQ;
9186 * and will process all the completions associated with the eq for the
9195 struct lpfc_queue *eq;
9204 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9205 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9206 fpeq = eq;
11249 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11251 struct lpfc_hba *phba = eq->phba;
11262 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11265 * Future io's coming on this eq should be able to
11270 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11292 struct lpfc_queue *eq;
11303 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11313 lpfc_sli4_poll_eq(eq);
15392 * @eq: Pointer to the queue structure.
15404 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15408 uint32_t qidx = eq->hdwq;
15555 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15657 struct lpfc_queue *eq;
15661 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15662 lpfc_sli4_poll_eq(eq);
15670 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15672 struct lpfc_hba *phba = eq->phba;
15679 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15683 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15685 struct lpfc_hba *phba = eq->phba;
15687 /* Disable slowpath processing for this eq. Kick start the eq
15688 * by RE-ARMING the eq's ASAP
15690 list_del_rcu(&eq->_poll_list);
15699 struct lpfc_queue *eq, *next;
15701 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15702 list_del(&eq->_poll_list);
15709 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15711 if (mode == eq->mode)
15724 WRITE_ONCE(eq->mode, mode);
15729 * Add this eq to the polling list and start polling. For
15731 * try to process the eq _but_ that's fine. We have a
15734 * errupt handler (not eq's) as we have guranteed through
15737 * the EQ. The whole idea is eq's die off eventually as
15740 mode ? lpfc_sli4_add_to_poll_list(eq) :
15741 lpfc_sli4_remove_from_poll_list(eq);
15744 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15746 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15749 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15751 struct lpfc_hba *phba = eq->phba;
15753 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15756 * Once we switch back to interrupt processing on a eq
15757 * the io path completion will only arm eq's when it
15758 * receives a completion. But since eq's are in disa-
15762 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15944 struct lpfc_queue *eq;
15964 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15965 if (!eq)
15968 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
16000 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16001 if (!eq)
16003 eq->q_mode = usdelay;
16004 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16005 eq_delay->u.request.eq[cnt].phase = 0;
16006 eq_delay->u.request.eq[cnt].delay_multi = dmult;
16033 * @eq: The queue structure to use to create the event queue.
16036 * This function creates an event queue, as detailed in @eq, on a port,
16039 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16051 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16063 if (!eq)
16079 eq->page_count);
16096 switch (eq->entry_count) {
16100 eq->entry_count);
16101 if (eq->entry_count < 256) {
16127 list_for_each_entry(dmabuf, &eq->page_list, list) {
16148 eq->type = LPFC_EQ;
16149 eq->subtype = LPFC_NONE;
16150 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16151 if (eq->queue_id == 0xFFFF)
16153 eq->host_index = 0;
16154 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16155 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16193 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16230 * @eq: The event queue to bind this completion queue to.
16239 * determine the number of pages to allocate and use for this queue. The @eq
16251 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16261 if (!cq || !eq)
16284 eq->queue_id);
16289 eq->queue_id);
16353 /* link the cq onto the parent eq child list */
16354 list_add_tail(&cq->list, &eq->child_list);
16359 cq->assoc_qid = eq->queue_id;
16360 cq->assoc_qp = eq;
16386 * determine the number of pages to allocate and use for this queue. The @eq
16402 struct lpfc_queue *eq;
16441 eq = hdwq[idx].hba_eq;
16442 if (!cq || !eq) {
16504 &cq_set->u.request, eq->queue_id);
16508 &cq_set->u.request, eq->queue_id);
16512 &cq_set->u.request, eq->queue_id);
16516 &cq_set->u.request, eq->queue_id);
16520 &cq_set->u.request, eq->queue_id);
16524 &cq_set->u.request, eq->queue_id);
16528 &cq_set->u.request, eq->queue_id);
16532 &cq_set->u.request, eq->queue_id);
16536 &cq_set->u.request, eq->queue_id);
16540 &cq_set->u.request, eq->queue_id);
16544 &cq_set->u.request, eq->queue_id);
16548 &cq_set->u.request, eq->queue_id);
16552 &cq_set->u.request, eq->queue_id);
16556 &cq_set->u.request, eq->queue_id);
16560 &cq_set->u.request, eq->queue_id);
16564 &cq_set->u.request, eq->queue_id);
16568 /* link the cq onto the parent eq child list */
16569 list_add_tail(&cq->list, &eq->child_list);
16573 cq->assoc_qid = eq->queue_id;
16574 cq->assoc_qp = eq;
17587 * @eq: The queue structure associated with the queue to destroy.
17589 * This function destroys a queue, as detailed in @eq by sending an mailbox
17592 * The @eq struct is used to get the queue ID of the queue to destroy.
17598 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17606 if (!eq)
17609 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17618 eq->queue_id);
17619 mbox->vport = eq->phba->pport;
17622 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17636 /* Remove eq from any list */
17637 list_del_init(&eq->list);
17638 mempool_free(mbox, eq->phba->mbox_mem_pool);