Lines Matching refs:queues
105 struct nvme_rdma_queue *queues;
161 return queue - queue->ctrl->queues;
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
335 struct nvme_rdma_queue *queue = &ctrl->queues[0];
468 * Spread I/O queues completion vectors according their queue index.
469 * Admin queues can always go on completion vector 0.
473 /* Polling queues need direct cq polling context */
576 queue = &ctrl->queues[idx];
665 nvme_rdma_free_queue(&ctrl->queues[i]);
673 nvme_rdma_stop_queue(&ctrl->queues[i]);
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
712 nvme_rdma_stop_queue(&ctrl->queues[i]);
729 "unable to set any I/O queues\n");
735 "creating %d I/O queues.\n", nr_io_queues);
749 nvme_rdma_free_queue(&ctrl->queues[i]);
777 nvme_rdma_free_queue(&ctrl->queues[0]);
790 ctrl->device = ctrl->queues[0].device;
848 nvme_rdma_stop_queue(&ctrl->queues[0]);
860 nvme_rdma_free_queue(&ctrl->queues[0]);
879 * Only start IO queues for which we have allocated the tagset
880 * and limitted it to the available queues. On reconnects, the
907 * If the number of queues has increased (reconnect case)
908 * start all new queues now.
935 nvme_rdma_stop_queue(&ctrl->queues[0]);
981 kfree(ctrl->queues);
1095 nvme_rdma_stop_queue(&ctrl->queues[0]);
1670 struct nvme_rdma_queue *queue = &ctrl->queues[0];
2292 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2294 if (!ctrl->queues)
2325 kfree(ctrl->queues);