Lines Matching refs:qpair

153 static inline bool nvme_qpair_is_admin_queue(struct nvme_qpair *qpair)
155 return qpair->id == 0;
158 static inline bool nvme_qpair_is_io_queue(struct nvme_qpair *qpair)
160 return qpair->id != 0;
178 static void nvme_qpair_admin_qpair_print_command(struct nvme_qpair *qpair,
183 qpair->id, cmd->cid,
187 static void nvme_qpair_io_qpair_print_command(struct nvme_qpair *qpair,
190 nvme_assert(qpair != NULL, "print_command: qpair == NULL\n");
200 qpair->id, cmd->cid, cmd->nsid,
208 qpair->id, cmd->cid, cmd->nsid);
213 cmd->opc, qpair->id, cmd->cid, cmd->nsid);
218 static void nvme_qpair_print_command(struct nvme_qpair *qpair,
221 nvme_assert(qpair != NULL, "qpair can not be NULL");
224 if (nvme_qpair_is_admin_queue(qpair))
225 return nvme_qpair_admin_qpair_print_command(qpair, cmd);
227 return nvme_qpair_io_qpair_print_command(qpair, cmd);
253 static void nvme_qpair_print_completion(struct nvme_qpair *qpair,
342 static void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
351 nvme_debug("qpair %d: Submit command, tail %d, cid %d / %d\n",
352 qpair->id,
353 (int)qpair->sq_tail,
357 qpair->tr[tr->cid].active = true;
358 nvme_qpair_copy_command(&qpair->cmd[qpair->sq_tail], &req->cmd);
360 if (++qpair->sq_tail == qpair->entries)
361 qpair->sq_tail = 0;
364 nvme_mmio_write_4(qpair->sq_tdbl, qpair->sq_tail);
367 static void nvme_qpair_complete_tracker(struct nvme_qpair *qpair,
377 qpair->tr[cpl->cid].active = false;
385 nvme_qpair_print_command(qpair, &req->cmd);
386 nvme_qpair_print_completion(qpair, cpl);
389 qpair->tr[cpl->cid].active = false;
397 nvme_qpair_submit_tracker(qpair, tr);
410 LIST_INSERT_HEAD(&qpair->free_tr, tr, list);
413 static void nvme_qpair_submit_queued_requests(struct nvme_qpair *qpair)
418 pthread_mutex_lock(&qpair->lock);
420 STAILQ_CONCAT(&req_queue, &qpair->queued_req);
427 while (!qpair->ctrlr->resetting && LIST_FIRST(&qpair->free_tr)
432 pthread_mutex_unlock(&qpair->lock);
433 nvme_qpair_submit_request(qpair, req);
434 pthread_mutex_lock(&qpair->lock);
437 STAILQ_CONCAT(&qpair->queued_req, &req_queue);
439 pthread_mutex_unlock(&qpair->lock);
442 static void nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
452 cpl.sqid = qpair->id;
458 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
461 static void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
470 cpl.sqid = qpair->id;
477 nvme_qpair_print_command(qpair, &req->cmd);
478 nvme_qpair_print_completion(qpair, &cpl);
487 static void nvme_qpair_abort_aers(struct nvme_qpair *qpair)
491 tr = LIST_FIRST(&qpair->outstanding_tr);
496 nvme_qpair_manual_complete_tracker(qpair, tr,
500 tr = LIST_FIRST(&qpair->outstanding_tr);
507 static inline void _nvme_qpair_admin_qpair_destroy(struct nvme_qpair *qpair)
509 nvme_qpair_abort_aers(qpair);
512 static inline void _nvme_qpair_req_bad_phys(struct nvme_qpair *qpair,
519 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
527 static int _nvme_qpair_build_contig_request(struct nvme_qpair *qpair,
539 _nvme_qpair_req_bad_phys(qpair, tr);
552 _nvme_qpair_req_bad_phys(qpair, tr);
569 _nvme_qpair_req_bad_phys(qpair, tr);
583 static int _nvme_qpair_build_hw_sgl_request(struct nvme_qpair *qpair,
615 _nvme_qpair_req_bad_phys(qpair, tr);
622 _nvme_qpair_req_bad_phys(qpair, tr);
664 static int _nvme_qpair_build_prps_sgl_request(struct nvme_qpair *qpair,
694 _nvme_qpair_req_bad_phys(qpair, tr);
756 static void _nvme_qpair_admin_qpair_enable(struct nvme_qpair *qpair)
766 LIST_FOREACH_SAFE(tr, &qpair->outstanding_tr, list, tr_temp) {
768 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
773 qpair->enabled = true;
776 static void _nvme_qpair_io_qpair_enable(struct nvme_qpair *qpair)
781 qpair->enabled = true;
783 qpair->ctrlr->enabled_io_qpairs++;
786 while (!STAILQ_EMPTY(&qpair->queued_req)) {
787 req = STAILQ_FIRST(&qpair->queued_req);
788 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
790 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
796 LIST_FOREACH_SAFE(tr, &qpair->outstanding_tr, list, temp) {
798 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
804 static inline void _nvme_qpair_admin_qpair_disable(struct nvme_qpair *qpair)
806 qpair->enabled = false;
807 nvme_qpair_abort_aers(qpair);
810 static inline void _nvme_qpair_io_qpair_disable(struct nvme_qpair *qpair)
812 qpair->enabled = false;
814 qpair->ctrlr->enabled_io_qpairs--;
843 int nvme_qpair_construct(struct nvme_ctrlr *ctrlr, struct nvme_qpair *qpair,
857 pthread_mutex_init(&qpair->lock, NULL);
859 qpair->entries = entries;
860 qpair->trackers = trackers;
861 qpair->qprio = qprio;
862 qpair->sq_in_cmb = false;
863 qpair->ctrlr = ctrlr;
875 qpair->cmd = ctrlr->cmb_bar_virt_addr + offset;
876 qpair->cmd_bus_addr = ctrlr->cmb_bar_phys_addr + offset;
877 qpair->sq_in_cmb = true;
879 nvme_debug("Allocated qpair %d cmd in cmb at %p / 0x%llx\n",
880 qpair->id,
881 qpair->cmd, qpair->cmd_bus_addr);
886 if (qpair->sq_in_cmb == false) {
888 qpair->cmd =
891 (unsigned long *) &qpair->cmd_bus_addr);
892 if (!qpair->cmd) {
893 nvme_err("Allocate qpair commands failed\n");
896 memset(qpair->cmd, 0, sizeof(struct nvme_cmd) * entries);
898 nvme_debug("Allocated qpair %d cmd %p / 0x%llx\n",
899 qpair->id,
900 qpair->cmd, qpair->cmd_bus_addr);
903 qpair->cpl = nvme_mem_alloc_node(sizeof(struct nvme_cpl) * entries,
905 (unsigned long *) &qpair->cpl_bus_addr);
906 if (!qpair->cpl) {
907 nvme_err("Allocate qpair completions failed\n");
910 memset(qpair->cpl, 0, sizeof(struct nvme_cpl) * entries);
912 nvme_debug("Allocated qpair %d cpl at %p / 0x%llx\n",
913 qpair->id,
914 qpair->cpl,
915 qpair->cpl_bus_addr);
918 qpair->sq_tdbl = doorbell_base +
919 (2 * qpair->id + 0) * ctrlr->doorbell_stride_u32;
920 qpair->cq_hdbl = doorbell_base +
921 (2 * qpair->id + 1) * ctrlr->doorbell_stride_u32;
923 LIST_INIT(&qpair->free_tr);
924 LIST_INIT(&qpair->outstanding_tr);
925 STAILQ_INIT(&qpair->free_req);
926 STAILQ_INIT(&qpair->queued_req);
929 if (nvme_request_pool_construct(qpair)) {
941 qpair->tr = nvme_mem_alloc_node(sizeof(struct nvme_tracker) * trackers,
944 if (!qpair->tr) {
948 memset(qpair->tr, 0, sizeof(struct nvme_tracker) * trackers);
950 nvme_debug("Allocated qpair %d trackers at %p / 0x%lx\n",
951 qpair->id, qpair->tr, phys_addr);
954 tr = &qpair->tr[i];
956 LIST_INSERT_HEAD(&qpair->free_tr, tr, list);
960 nvme_qpair_reset(qpair);
965 nvme_qpair_destroy(qpair);
970 void nvme_qpair_destroy(struct nvme_qpair *qpair)
972 if (!qpair->ctrlr)
975 if (nvme_qpair_is_admin_queue(qpair))
976 _nvme_qpair_admin_qpair_destroy(qpair);
978 if (qpair->cmd && !qpair->sq_in_cmb) {
979 nvme_free(qpair->cmd);
980 qpair->cmd = NULL;
982 if (qpair->cpl) {
983 nvme_free(qpair->cpl);
984 qpair->cpl = NULL;
986 if (qpair->tr) {
987 nvme_free(qpair->tr);
988 qpair->tr = NULL;
990 nvme_request_pool_destroy(qpair);
992 qpair->ctrlr = NULL;
994 pthread_mutex_destroy(&qpair->lock);
997 static bool nvme_qpair_enabled(struct nvme_qpair *qpair)
999 if (!qpair->enabled && !qpair->ctrlr->resetting)
1000 nvme_qpair_enable(qpair);
1002 return qpair->enabled;
1005 int nvme_qpair_submit_request(struct nvme_qpair *qpair,
1010 struct nvme_ctrlr *ctrlr = qpair->ctrlr;
1019 nvme_qpair_enabled(qpair);
1030 ret = nvme_qpair_submit_request(qpair, child_req);
1044 pthread_mutex_lock(&qpair->lock);
1046 tr = LIST_FIRST(&qpair->free_tr);
1047 if (tr == NULL || !qpair->enabled || !STAILQ_EMPTY(&qpair->queued_req)) {
1049 * No tracker is available, the qpair is disabled due
1053 * Put the request on the qpair's request queue to be
1057 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1058 pthread_mutex_unlock(&qpair->lock);
1061 nvme_qpair_submit_queued_requests(qpair);
1067 LIST_INSERT_HEAD(&qpair->outstanding_tr, tr, list);
1075 ret = _nvme_qpair_build_contig_request(qpair, req, tr);
1078 ret = _nvme_qpair_build_hw_sgl_request(qpair, req, tr);
1080 ret = _nvme_qpair_build_prps_sgl_request(qpair, req, tr);
1082 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1089 nvme_qpair_submit_tracker(qpair, tr);
1091 pthread_mutex_unlock(&qpair->lock);
1100 unsigned int nvme_qpair_poll(struct nvme_qpair *qpair,
1107 if (!nvme_qpair_enabled(qpair))
1109 * qpair is not enabled, likely because a controller reset is
1117 (max_completions > (qpair->entries - 1U)))
1123 max_completions = qpair->entries - 1;
1125 pthread_mutex_lock(&qpair->lock);
1129 cpl = &qpair->cpl[qpair->cq_head];
1130 if (cpl->status.p != qpair->phase)
1133 tr = &qpair->tr[cpl->cid];
1135 nvme_qpair_complete_tracker(qpair, tr, cpl, true);
1138 nvme_qpair_print_completion(qpair, cpl);
1142 if (++qpair->cq_head == qpair->entries) {
1143 qpair->cq_head = 0;
1144 qpair->phase = !qpair->phase;
1152 nvme_mmio_write_4(qpair->cq_hdbl, qpair->cq_head);
1154 pthread_mutex_unlock(&qpair->lock);
1156 if (!STAILQ_EMPTY(&qpair->queued_req))
1157 nvme_qpair_submit_queued_requests(qpair);
1162 void nvme_qpair_reset(struct nvme_qpair *qpair)
1164 pthread_mutex_lock(&qpair->lock);
1166 qpair->sq_tail = qpair->cq_head = 0;
1174 qpair->phase = 1;
1176 memset(qpair->cmd, 0, qpair->entries * sizeof(struct nvme_cmd));
1177 memset(qpair->cpl, 0, qpair->entries * sizeof(struct nvme_cpl));
1179 pthread_mutex_unlock(&qpair->lock);
1182 void nvme_qpair_enable(struct nvme_qpair *qpair)
1184 pthread_mutex_lock(&qpair->lock);
1186 if (nvme_qpair_is_io_queue(qpair))
1187 _nvme_qpair_io_qpair_enable(qpair);
1189 _nvme_qpair_admin_qpair_enable(qpair);
1191 pthread_mutex_unlock(&qpair->lock);
1194 void nvme_qpair_disable(struct nvme_qpair *qpair)
1196 pthread_mutex_lock(&qpair->lock);
1198 if (nvme_qpair_is_io_queue(qpair))
1199 _nvme_qpair_io_qpair_disable(qpair);
1201 _nvme_qpair_admin_qpair_disable(qpair);
1203 pthread_mutex_unlock(&qpair->lock);
1206 void nvme_qpair_fail(struct nvme_qpair *qpair)
1211 pthread_mutex_lock(&qpair->lock);
1213 while (!STAILQ_EMPTY(&qpair->queued_req)) {
1216 req = STAILQ_FIRST(&qpair->queued_req);
1217 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1218 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1225 while (!LIST_EMPTY(&qpair->outstanding_tr)) {
1232 tr = LIST_FIRST(&qpair->outstanding_tr);
1233 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1239 pthread_mutex_unlock(&qpair->lock);