Lines Matching defs:aq

126 	struct efa_com_admin_queue *aq = &edev->aq;
127 struct efa_com_admin_sq *sq = &aq->sq;
128 u16 size = aq->depth * sizeof(*sq->entries);
134 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
152 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
163 struct efa_com_admin_queue *aq = &edev->aq;
164 struct efa_com_admin_cq *cq = &aq->cq;
165 u16 size = aq->depth * sizeof(*cq->entries);
171 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
186 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
190 aq->msix_vector_idx);
244 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
248 spin_lock(&aq->comp_ctx_lock);
249 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
250 aq->comp_ctx_pool_next++;
251 spin_unlock(&aq->comp_ctx_lock);
256 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
259 spin_lock(&aq->comp_ctx_lock);
260 aq->comp_ctx_pool_next--;
261 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
262 spin_unlock(&aq->comp_ctx_lock);
265 static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
270 u16 ctx_id = cmd_id & (aq->depth - 1);
272 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
274 efa_com_dealloc_ctx_id(aq, ctx_id);
277 static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
280 u16 ctx_id = cmd_id & (aq->depth - 1);
282 if (aq->comp_ctx[ctx_id].occupied && capture) {
284 aq->efa_dev,
291 aq->comp_ctx[ctx_id].occupied = 1;
292 ibdev_dbg(aq->efa_dev,
296 return &aq->comp_ctx[ctx_id];
299 static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
312 queue_size_mask = aq->depth - 1;
313 pi = aq->sq.pc & queue_size_mask;
315 ctx_id = efa_com_alloc_ctx_id(aq);
319 cmd_id |= aq->sq.pc & ~queue_size_mask;
324 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
326 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
328 efa_com_dealloc_ctx_id(aq, ctx_id);
339 aqe = &aq->sq.entries[pi];
343 aq->sq.pc++;
344 atomic64_inc(&aq->stats.submitted_cmd);
346 if ((aq->sq.pc & queue_size_mask) == 0)
347 aq->sq.phase = !aq->sq.phase;
350 writel(aq->sq.pc, aq->sq.db_addr);
355 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
357 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
358 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
362 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
363 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
364 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
365 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
366 devm_kfree(aq->dmadev, aq->comp_ctx);
370 for (i = 0; i < aq->depth; i++) {
371 comp_ctx = efa_com_get_comp_ctx(aq, i, false);
375 aq->comp_ctx_pool[i] = i;
378 spin_lock_init(&aq->comp_ctx_lock);
380 aq->comp_ctx_pool_next = 0;
385 static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
393 spin_lock(&aq->sq.lock);
394 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
395 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
396 spin_unlock(&aq->sq.lock);
400 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
402 spin_unlock(&aq->sq.lock);
404 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
409 static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
418 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
420 ibdev_err(aq->efa_dev,
422 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
429 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
433 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
441 queue_size_mask = aq->depth - 1;
443 ci = aq->cq.cc & queue_size_mask;
444 phase = aq->cq.phase;
446 cqe = &aq->cq.entries[ci];
456 efa_com_handle_single_admin_completion(aq, cqe);
460 if (ci == aq->depth) {
465 cqe = &aq->cq.entries[ci];
468 aq->cq.cc += comp_num;
469 aq->cq.phase = phase;
470 aq->sq.cc += comp_num;
471 atomic64_add(comp_num, &aq->stats.completed_cmd);
494 struct efa_com_admin_queue *aq)
500 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
503 spin_lock_irqsave(&aq->cq.lock, flags);
504 efa_com_handle_admin_completion(aq);
505 spin_unlock_irqrestore(&aq->cq.lock, flags);
512 aq->efa_dev,
515 atomic64_inc(&aq->stats.no_completion);
517 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
522 msleep(aq->poll_interval);
527 efa_com_put_comp_ctx(aq, comp_ctx);
532 struct efa_com_admin_queue *aq)
538 usecs_to_jiffies(aq->completion_timeout));
547 spin_lock_irqsave(&aq->cq.lock, flags);
548 efa_com_handle_admin_completion(aq);
549 spin_unlock_irqrestore(&aq->cq.lock, flags);
551 atomic64_inc(&aq->stats.no_completion);
555 aq->efa_dev,
559 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
562 aq->efa_dev,
566 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
568 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
575 efa_com_put_comp_ctx(aq, comp_ctx);
588 struct efa_com_admin_queue *aq)
590 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
591 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
593 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
598 * @aq: admin queue.
609 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
621 down(&aq->avail_cmds);
623 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
626 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
629 aq->efa_dev,
634 up(&aq->avail_cmds);
635 atomic64_inc(&aq->stats.cmd_err);
639 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
642 aq->efa_dev,
647 atomic64_inc(&aq->stats.cmd_err);
650 up(&aq->avail_cmds);
661 struct efa_com_admin_queue *aq = &edev->aq;
663 struct efa_com_admin_cq *cq = &aq->cq;
664 struct efa_com_admin_sq *sq = &aq->sq;
667 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
669 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
670 devm_kfree(edev->dmadev, aq->comp_ctx);
672 size = aq->depth * sizeof(*sq->entries);
675 size = aq->depth * sizeof(*cq->entries);
698 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
700 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
705 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
708 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
725 struct efa_com_admin_queue *aq = &edev->aq;
738 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
740 aq->dmadev = edev->dmadev;
741 aq->efa_dev = edev->efa_dev;
742 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
744 sema_init(&aq->avail_cmds, aq->depth);
748 err = efa_com_init_comp_ctxt(aq);
770 aq->completion_timeout = timeout * 100000;
772 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
774 aq->poll_interval = EFA_POLL_INTERVAL_MS;
776 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
781 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
782 aq->cq.entries, aq->cq.dma_addr);
784 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
785 aq->sq.entries, aq->sq.dma_addr);
787 devm_kfree(edev->dmadev, aq->comp_ctx);
805 spin_lock_irqsave(&edev->aq.cq.lock, flags);
806 efa_com_handle_admin_completion(&edev->aq);
807 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
1086 edev->aq.completion_timeout = timeout * 100000;
1088 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1097 struct efa_com_admin_queue *aq = &edev->aq;
1112 err = efa_com_cmd_exec(aq,
1131 struct efa_com_admin_queue *aq = &edev->aq;
1139 err = efa_com_cmd_exec(aq,