Lines Matching refs:cq

64 	struct ice_ctl_q_info *cq = &hw->adminq;
68 ICE_CQ_INIT_REGS(cq, PF_FW);
79 struct ice_ctl_q_info *cq = &hw->mailboxq;
81 ICE_CQ_INIT_REGS(cq, PF_MBX);
87 * @cq: pointer to the specific Control queue
91 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
94 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
95 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
96 cq->sq.len_ena_mask)) ==
97 (cq->num_sq_entries | cq->sq.len_ena_mask);
105 * @cq: pointer to the specific Control queue
108 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
112 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
113 if (!cq->sq.desc_buf.va)
122 * @cq: pointer to the specific Control queue
125 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
127 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
129 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
130 if (!cq->rq.desc_buf.va)
151 * @cq: pointer to the specific Control queue
154 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
161 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
162 sizeof(cq->rq.desc_buf));
163 if (!cq->rq.dma_head)
165 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
168 for (i = 0; i < cq->num_rq_entries; i++) {
172 bi = &cq->rq.r.rq_bi[i];
173 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
178 desc = ICE_CTL_Q_DESC(cq->rq, i);
181 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
204 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
205 cq->rq.r.rq_bi = NULL;
206 ice_free(hw, cq->rq.dma_head);
207 cq->rq.dma_head = NULL;
215 * @cq: pointer to the specific Control queue
218 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
223 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
224 sizeof(cq->sq.desc_buf));
225 if (!cq->sq.dma_head)
227 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
230 for (i = 0; i < cq->num_sq_entries; i++) {
233 bi = &cq->sq.r.sq_bi[i];
234 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
244 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
245 cq->sq.r.sq_bi = NULL;
246 ice_free(hw, cq->sq.dma_head);
247 cq->sq.dma_head = NULL;
274 * @cq: pointer to the specific Control queue
279 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
287 * @cq: pointer to the specific Control queue
292 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
296 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
301 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
324 * @cq: pointer to the specific Control queue
328 * in the cq->structure:
329 * - cq->num_sq_entries
330 * - cq->sq_buf_size
335 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
341 if (cq->sq.count > 0) {
348 if (!cq->num_sq_entries || !cq->sq_buf_size) {
353 cq->sq.next_to_use = 0;
354 cq->sq.next_to_clean = 0;
357 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
362 ret_code = ice_alloc_sq_bufs(hw, cq);
367 ret_code = ice_cfg_sq_regs(hw, cq);
372 cq->sq.count = cq->num_sq_entries;
376 ICE_FREE_CQ_BUFS(hw, cq, sq);
377 ice_free_cq_ring(hw, &cq->sq);
386 * @cq: pointer to the specific Control queue
390 * in the cq->structure:
391 * - cq->num_rq_entries
392 * - cq->rq_buf_size
397 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
403 if (cq->rq.count > 0) {
410 if (!cq->num_rq_entries || !cq->rq_buf_size) {
415 cq->rq.next_to_use = 0;
416 cq->rq.next_to_clean = 0;
419 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
424 ret_code = ice_alloc_rq_bufs(hw, cq);
429 ret_code = ice_cfg_rq_regs(hw, cq);
434 cq->rq.count = cq->num_rq_entries;
438 ICE_FREE_CQ_BUFS(hw, cq, rq);
439 ice_free_cq_ring(hw, &cq->rq);
448 * @cq: pointer to the specific Control queue
453 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
459 ice_acquire_lock(&cq->sq_lock);
461 if (!cq->sq.count) {
467 wr32(hw, cq->sq.head, 0);
468 wr32(hw, cq->sq.tail, 0);
469 wr32(hw, cq->sq.len, 0);
470 wr32(hw, cq->sq.bal, 0);
471 wr32(hw, cq->sq.bah, 0);
473 cq->sq.count = 0; /* to indicate uninitialized queue */
476 ICE_FREE_CQ_BUFS(hw, cq, sq);
477 ice_free_cq_ring(hw, &cq->sq);
480 ice_release_lock(&cq->sq_lock);
519 * @cq: pointer to the specific Control queue
524 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
530 ice_acquire_lock(&cq->rq_lock);
532 if (!cq->rq.count) {
538 wr32(hw, cq->rq.head, 0);
539 wr32(hw, cq->rq.tail, 0);
540 wr32(hw, cq->rq.len, 0);
541 wr32(hw, cq->rq.bal, 0);
542 wr32(hw, cq->rq.bah, 0);
545 cq->rq.count = 0;
548 ICE_FREE_CQ_BUFS(hw, cq, rq);
549 ice_free_cq_ring(hw, &cq->rq);
552 ice_release_lock(&cq->rq_lock);
559 * @cq: pointer to the specific Control queue
561 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
563 wr32(hw, cq->sq.len, 0);
564 wr32(hw, cq->rq.len, 0);
575 struct ice_ctl_q_info *cq = &hw->adminq;
592 ice_shutdown_rq(hw, cq);
593 ice_shutdown_sq(hw, cq);
603 * in the cq->structure:
604 * - cq->num_sq_entries
605 * - cq->num_rq_entries
606 * - cq->rq_buf_size
607 * - cq->sq_buf_size
613 struct ice_ctl_q_info *cq;
621 cq = &hw->adminq;
625 cq = &hw->mailboxq;
630 cq->qtype = q_type;
633 if (!cq->num_rq_entries || !cq->num_sq_entries ||
634 !cq->rq_buf_size || !cq->sq_buf_size) {
639 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
642 ret_code = ice_init_sq(hw, cq);
647 ret_code = ice_init_rq(hw, cq);
655 ice_shutdown_sq(hw, cq);
671 struct ice_ctl_q_info *cq;
677 cq = &hw->adminq;
678 if (ice_check_sq_alive(hw, cq))
682 cq = &hw->mailboxq;
688 ice_shutdown_sq(hw, cq);
689 ice_shutdown_rq(hw, cq);
715 * in the cq->structure for all control queues:
716 * - cq->num_sq_entries
717 * - cq->num_rq_entries
718 * - cq->rq_buf_size
719 * - cq->sq_buf_size
753 * @cq: pointer to the control queue
757 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
759 ice_init_lock(&cq->sq_lock);
760 ice_init_lock(&cq->rq_lock);
768 * in the cq->structure for all control queues:
769 * - cq->num_sq_entries
770 * - cq->num_rq_entries
771 * - cq->rq_buf_size
772 * - cq->sq_buf_size
789 * @cq: pointer to the control queue
793 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
795 ice_destroy_lock(&cq->sq_lock);
796 ice_destroy_lock(&cq->rq_lock);
820 * @cq: pointer to the specific Control queue
824 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
826 struct ice_ctl_q_ring *sq = &cq->sq;
832 while (rd32(hw, cq->sq.head) != ntc) {
833 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
869 * @cq: pointer to the specific Control queue
878 ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
894 ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
921 * @cq: pointer to the specific Control queue
926 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
931 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
937 * @cq: pointer to the specific Control queue
948 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
964 cq->sq_last_status = ICE_AQ_RC_OK;
966 if (!cq->sq.count) {
978 if (buf_size > cq->sq_buf_size) {
990 val = rd32(hw, cq->sq.head);
991 if (val >= cq->num_sq_entries) {
1003 if (ice_clean_sq(hw, cq) == 0) {
1010 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1018 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1034 ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1036 (cq->sq.next_to_use)++;
1037 if (cq->sq.next_to_use == cq->sq.count)
1038 cq->sq.next_to_use = 0;
1039 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1048 if (ice_sq_done(hw, cq))
1053 } while (total_delay < cq->sq_cmd_timeout);
1056 if (ice_sq_done(hw, cq)) {
1084 cq->sq_last_status = (enum ice_aq_err)retval;
1088 ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1097 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1098 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1114 * @cq: pointer to the specific Control queue
1125 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1135 ice_acquire_lock(&cq->sq_lock);
1136 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1137 ice_release_lock(&cq->sq_lock);
1160 * @cq: pointer to the specific Control queue
1169 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1172 u16 ntc = cq->rq.next_to_clean;
1186 ice_acquire_lock(&cq->rq_lock);
1188 if (!cq->rq.count) {
1195 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1204 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1218 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1222 ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1227 bi = &cq->rq.r.rq_bi[ntc];
1231 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1238 wr32(hw, cq->rq.tail, ntc);
1241 if (ntc == cq->num_rq_entries)
1243 cq->rq.next_to_clean = ntc;
1244 cq->rq.next_to_use = ntu;
1250 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1251 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1254 ice_release_lock(&cq->rq_lock);