Lines Matching defs:ctrlr

28 nvmft_printf(struct nvmft_controller *ctrlr, const char *fmt, ...)
38 sbuf_printf(&sb, "nvmft%u: ", ctrlr->cntlid);
54 struct nvmft_controller *ctrlr;
56 ctrlr = malloc(sizeof(*ctrlr), M_NVMFT, M_WAITOK | M_ZERO);
57 ctrlr->cntlid = cntlid;
59 TAILQ_INSERT_TAIL(&np->controllers, ctrlr, link);
60 ctrlr->np = np;
61 mtx_init(&ctrlr->lock, "nvmft controller", NULL, MTX_DEF);
62 callout_init(&ctrlr->ka_timer, 1);
63 TASK_INIT(&ctrlr->shutdown_task, 0, nvmft_controller_shutdown, ctrlr);
64 TIMEOUT_TASK_INIT(taskqueue_thread, &ctrlr->terminate_task, 0,
65 nvmft_controller_terminate, ctrlr);
67 ctrlr->cdata = np->cdata;
68 ctrlr->cdata.ctrlr_id = htole16(cntlid);
69 memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid));
70 memcpy(ctrlr->hostnqn, data->hostnqn, sizeof(ctrlr->hostnqn));
71 ctrlr->hip.power_cycles[0] = 1;
72 ctrlr->create_time = sbinuptime();
74 ctrlr->changed_ns = malloc(sizeof(*ctrlr->changed_ns), M_NVMFT,
77 return (ctrlr);
81 nvmft_controller_free(struct nvmft_controller *ctrlr)
83 mtx_destroy(&ctrlr->lock);
84 MPASS(ctrlr->io_qpairs == NULL);
85 free(ctrlr->changed_ns, M_NVMFT);
86 free(ctrlr, M_NVMFT);
92 struct nvmft_controller *ctrlr = arg;
95 if (ctrlr->shutdown)
98 traffic = atomic_readandclear_int(&ctrlr->ka_active_traffic);
100 nvmft_printf(ctrlr,
102 nvmft_controller_error(ctrlr, NULL, ETIMEDOUT);
106 callout_schedule_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0, C_HARDCLOCK);
115 struct nvmft_controller *ctrlr;
139 TAILQ_FOREACH(ctrlr, &np->controllers, link) {
140 KASSERT(ctrlr->cntlid != cntlid,
145 ctrlr = nvmft_controller_alloc(np, cntlid, data);
146 nvmft_printf(ctrlr, "associated with %.*s\n",
148 ctrlr->admin = qp;
149 ctrlr->trtype = handoff->trtype;
161 ctrlr->ka_sbt = mstosbt(roundup(kato, 1000));
162 callout_reset_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0,
163 nvmft_keep_alive_timer, ctrlr, C_HARDCLOCK);
166 nvmft_finish_accept(qp, cmd, ctrlr);
178 struct nvmft_controller *ctrlr;
192 TAILQ_FOREACH(ctrlr, &np->controllers, link) {
193 if (ctrlr->cntlid == cntlid)
196 if (ctrlr == NULL) {
199 ctrlr->cntlid, qid, (int)sizeof(data->hostnqn),
207 if (memcmp(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid)) != 0) {
209 nvmft_printf(ctrlr,
217 if (memcmp(ctrlr->hostnqn, data->hostnqn, sizeof(ctrlr->hostnqn)) != 0) {
219 nvmft_printf(ctrlr,
228 /* XXX: Require handoff->trtype == ctrlr->trtype? */
230 mtx_lock(&ctrlr->lock);
231 if (ctrlr->shutdown) {
232 mtx_unlock(&ctrlr->lock);
234 nvmft_printf(ctrlr,
242 if (ctrlr->num_io_queues == 0) {
243 mtx_unlock(&ctrlr->lock);
245 nvmft_printf(ctrlr,
253 if (cmd->qid > ctrlr->num_io_queues) {
254 mtx_unlock(&ctrlr->lock);
256 nvmft_printf(ctrlr,
264 if (ctrlr->io_qpairs[qid - 1].qp != NULL) {
265 mtx_unlock(&ctrlr->lock);
267 nvmft_printf(ctrlr,
276 ctrlr->io_qpairs[qid - 1].qp = qp;
277 mtx_unlock(&ctrlr->lock);
278 nvmft_finish_accept(qp, cmd, ctrlr);
287 struct nvmft_controller *ctrlr = arg;
295 mtx_lock(&ctrlr->lock);
296 for (u_int i = 0; i < ctrlr->num_io_queues; i++) {
297 if (ctrlr->io_qpairs[i].qp != NULL) {
298 ctrlr->io_qpairs[i].shutdown = true;
299 mtx_unlock(&ctrlr->lock);
300 nvmft_qpair_shutdown(ctrlr->io_qpairs[i].qp);
301 mtx_lock(&ctrlr->lock);
304 mtx_unlock(&ctrlr->lock);
307 nvmft_terminate_commands(ctrlr);
310 mtx_lock(&ctrlr->lock);
311 while (ctrlr->pending_commands != 0)
312 mtx_sleep(&ctrlr->pending_commands, &ctrlr->lock, 0, "nvmftsh",
314 mtx_unlock(&ctrlr->lock);
317 for (u_int i = 0; i < ctrlr->num_io_queues; i++) {
318 if (ctrlr->io_qpairs[i].qp != NULL)
319 nvmft_qpair_destroy(ctrlr->io_qpairs[i].qp);
321 free(ctrlr->io_qpairs, M_NVMFT);
322 ctrlr->io_qpairs = NULL;
324 mtx_lock(&ctrlr->lock);
325 ctrlr->num_io_queues = 0;
328 if (NVMEV(NVME_CSTS_REG_SHST, ctrlr->csts) == NVME_SHST_OCCURRING) {
329 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_SHST);
330 ctrlr->csts |= NVMEF(NVME_CSTS_REG_SHST, NVME_SHST_COMPLETE);
333 if (NVMEV(NVME_CSTS_REG_CFS, ctrlr->csts) == 0) {
334 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_RDY);
335 ctrlr->shutdown = false;
337 mtx_unlock(&ctrlr->lock);
345 if (ctrlr->admin_closed || NVMEV(NVME_CSTS_REG_CFS, ctrlr->csts) != 0)
346 nvmft_controller_terminate(ctrlr, 0);
349 &ctrlr->terminate_task, hz * 60 * 2);
355 struct nvmft_controller *ctrlr = arg;
360 mtx_lock(&ctrlr->lock);
361 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) != 0) {
362 mtx_unlock(&ctrlr->lock);
364 if (ctrlr->ka_sbt != 0)
365 callout_schedule_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0,
371 ctrlr->shutdown = true;
372 mtx_unlock(&ctrlr->lock);
374 nvmft_qpair_destroy(ctrlr->admin);
377 np = ctrlr->np;
379 TAILQ_REMOVE(&np->controllers, ctrlr, link);
380 free_unr(np->ids, ctrlr->cntlid);
386 callout_drain(&ctrlr->ka_timer);
388 nvmft_printf(ctrlr, "association terminated\n");
389 nvmft_controller_free(ctrlr);
394 nvmft_controller_error(struct nvmft_controller *ctrlr, struct nvmft_qpair *qp,
408 if (qp != ctrlr->admin)
411 mtx_lock(&ctrlr->lock);
412 if (ctrlr->shutdown) {
413 ctrlr->admin_closed = true;
414 mtx_unlock(&ctrlr->lock);
418 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0) {
419 MPASS(ctrlr->num_io_queues == 0);
420 mtx_unlock(&ctrlr->lock);
423 * Ok to drop lock here since ctrlr->cc can't
438 &ctrlr->terminate_task, NULL) == 0)
440 &ctrlr->terminate_task, 0);
449 ctrlr->admin_closed = true;
451 mtx_lock(&ctrlr->lock);
454 if (ctrlr->shutdown) {
455 mtx_unlock(&ctrlr->lock);
459 ctrlr->csts |= NVMEF(NVME_CSTS_REG_CFS, 1);
460 ctrlr->cc &= ~NVMEM(NVME_CC_REG_EN);
461 ctrlr->shutdown = true;
462 mtx_unlock(&ctrlr->lock);
464 callout_stop(&ctrlr->ka_timer);
465 taskqueue_enqueue(taskqueue_thread, &ctrlr->shutdown_task);
519 handle_get_log_page(struct nvmft_controller *ctrlr,
564 mtx_lock(&ctrlr->lock);
565 hip = ctrlr->hip;
567 sbintime_getsec(ctrlr->busy_total) / 60;
569 sbintime_getsec(sbinuptime() - ctrlr->create_time) / 3600;
570 mtx_unlock(&ctrlr->lock);
581 if (offset >= sizeof(ctrlr->np->fp)) {
585 todo = sizeof(ctrlr->np->fp) - offset;
590 m_copyback(m, 0, todo, (char *)&ctrlr->np->fp + offset);
597 if (offset >= sizeof(*ctrlr->changed_ns)) {
601 todo = sizeof(*ctrlr->changed_ns) - offset;
606 mtx_lock(&ctrlr->lock);
607 m_copyback(m, 0, todo, (char *)ctrlr->changed_ns + offset);
608 if (offset == 0 && len == sizeof(*ctrlr->changed_ns))
609 memset(ctrlr->changed_ns, 0,
610 sizeof(*ctrlr->changed_ns));
612 ctrlr->changed_ns_reported = false;
613 mtx_unlock(&ctrlr->lock);
620 nvmft_printf(ctrlr, "Unsupported page %#x for GET_LOG_PAGE\n",
628 nvmft_command_completed(ctrlr->admin, nc);
630 nvmft_send_generic_error(ctrlr->admin, nc, status);
641 handle_identify_command(struct nvmft_controller *ctrlr,
651 if (data_len != sizeof(ctrlr->cdata)) {
652 nvmft_printf(ctrlr,
655 nvmft_send_generic_error(ctrlr->admin, nc,
664 nvmft_dispatch_command(ctrlr->admin, nc, true);
668 m = m_getml(sizeof(ctrlr->cdata), M_WAITOK);
669 m_copyback(m, 0, sizeof(ctrlr->cdata), (void *)&ctrlr->cdata);
671 sizeof(ctrlr->cdata));
687 nvmft_populate_active_nslist(ctrlr->np, nsid, nslist);
697 nvmft_printf(ctrlr, "Unsupported CNS %#x for IDENTIFY\n", cns);
703 nvmft_command_completed(ctrlr->admin, nc);
705 nvmft_send_generic_error(ctrlr->admin, nc, status);
710 handle_set_features(struct nvmft_controller *ctrlr,
739 mtx_lock(&ctrlr->lock);
740 if (ctrlr->num_io_queues != 0) {
741 mtx_unlock(&ctrlr->lock);
743 nvmft_send_generic_error(ctrlr->admin, nc,
749 ctrlr->num_io_queues = num_queues;
750 ctrlr->io_qpairs = io_qpairs;
751 mtx_unlock(&ctrlr->lock);
755 nvmft_send_response(ctrlr->admin, &cqe);
769 mtx_lock(&ctrlr->lock);
770 ctrlr->aer_mask = aer_mask;
771 mtx_unlock(&ctrlr->lock);
772 nvmft_send_success(ctrlr->admin, nc);
776 nvmft_printf(ctrlr,
782 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD);
787 update_cc(struct nvmft_controller *ctrlr, uint32_t new_cc, bool *need_shutdown)
789 struct nvmft_port *np = ctrlr->np;
794 mtx_lock(&ctrlr->lock);
797 if (ctrlr->shutdown) {
798 mtx_unlock(&ctrlr->lock);
802 if (!_nvmf_validate_cc(np->max_io_qsize, np->cap, ctrlr->cc, new_cc)) {
803 mtx_unlock(&ctrlr->lock);
807 changes = ctrlr->cc ^ new_cc;
808 ctrlr->cc = new_cc;
813 ctrlr->csts &= ~NVMEM(NVME_CSTS_REG_SHST);
814 ctrlr->csts |= NVMEF(NVME_CSTS_REG_SHST, NVME_SHST_OCCURRING);
815 ctrlr->cc &= ~NVMEM(NVME_CC_REG_EN);
816 ctrlr->shutdown = true;
818 nvmft_printf(ctrlr, "shutdown requested\n");
824 nvmft_printf(ctrlr, "reset requested\n");
825 ctrlr->shutdown = true;
828 ctrlr->csts |= NVMEF(NVME_CSTS_REG_RDY, 1);
830 mtx_unlock(&ctrlr->lock);
836 handle_property_get(struct nvmft_controller *ctrlr, struct nvmf_capsule *nc,
847 rsp.value.u64 = htole64(ctrlr->np->cap);
852 rsp.value.u32.low = ctrlr->cdata.ver;
857 rsp.value.u32.low = htole32(ctrlr->cc);
862 rsp.value.u32.low = htole32(ctrlr->csts);
868 nvmft_send_response(ctrlr->admin, &rsp);
871 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD);
875 handle_property_set(struct nvmft_controller *ctrlr, struct nvmf_capsule *nc,
885 if (!update_cc(ctrlr, le32toh(pset->value.u32.low),
893 nvmft_send_success(ctrlr->admin, nc);
895 callout_stop(&ctrlr->ka_timer);
896 taskqueue_enqueue(taskqueue_thread, &ctrlr->shutdown_task);
900 nvmft_send_generic_error(ctrlr->admin, nc, NVME_SC_INVALID_FIELD);
904 handle_admin_fabrics_command(struct nvmft_controller *ctrlr,
909 handle_property_get(ctrlr, nc,
913 handle_property_set(ctrlr, nc,
917 nvmft_printf(ctrlr,
919 nvmft_send_generic_error(ctrlr->admin, nc,
923 nvmft_printf(ctrlr, "DISCONNECT command on admin queue\n");
924 nvmft_send_error(ctrlr->admin, nc, NVME_SCT_COMMAND_SPECIFIC,
928 nvmft_printf(ctrlr, "Unsupported fabrics command %#x\n",
930 nvmft_send_generic_error(ctrlr->admin, nc,
938 nvmft_handle_admin_command(struct nvmft_controller *ctrlr,
944 if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0 &&
946 nvmft_printf(ctrlr,
948 nvmft_send_generic_error(ctrlr->admin, nc,
954 atomic_store_int(&ctrlr->ka_active_traffic, 1);
958 handle_get_log_page(ctrlr, nc, cmd);
961 handle_identify_command(ctrlr, nc, cmd);
964 handle_set_features(ctrlr, nc, cmd);
967 mtx_lock(&ctrlr->lock);
968 if (ctrlr->aer_pending == NVMFT_NUM_AER) {
969 mtx_unlock(&ctrlr->lock);
970 nvmft_send_error(ctrlr->admin, nc,
975 ctrlr->aer_cids[ctrlr->aer_pidx] = cmd->cid;
976 ctrlr->aer_pending++;
977 ctrlr->aer_pidx = (ctrlr->aer_pidx + 1) % NVMFT_NUM_AER;
978 mtx_unlock(&ctrlr->lock);
983 nvmft_send_success(ctrlr->admin, nc);
987 handle_admin_fabrics_command(ctrlr, nc,
991 nvmft_printf(ctrlr, "Unsupported admin opcode %#x\n", cmd->opc);
992 nvmft_send_generic_error(ctrlr->admin, nc,
1003 struct nvmft_controller *ctrlr = nvmft_qpair_ctrlr(qp);
1006 atomic_store_int(&ctrlr->ka_active_traffic, 1);
1027 nvmft_printf(ctrlr, "Unsupported I/O opcode %#x\n", cmd->opc);
1036 nvmft_report_aer(struct nvmft_controller *ctrlr, uint32_t aer_mask,
1044 mtx_lock(&ctrlr->lock);
1045 if ((ctrlr->aer_mask & aer_mask) == 0) {
1046 mtx_unlock(&ctrlr->lock);
1054 if (ctrlr->aer_pending == 0) {
1055 mtx_unlock(&ctrlr->lock);
1056 nvmft_printf(ctrlr,
1063 cpl.cid = ctrlr->aer_cids[ctrlr->aer_cidx];
1064 ctrlr->aer_pending--;
1065 ctrlr->aer_cidx = (ctrlr->aer_cidx + 1) % NVMFT_NUM_AER;
1066 mtx_unlock(&ctrlr->lock);
1072 nvmft_send_response(ctrlr->admin, &cpl);
1076 nvmft_controller_lun_changed(struct nvmft_controller *ctrlr, int lun_id)
1084 mtx_lock(&ctrlr->lock);
1085 nslist = ctrlr->changed_ns;
1094 mtx_unlock(&ctrlr->lock);
1104 memset(ctrlr->changed_ns, 0,
1105 sizeof(*ctrlr->changed_ns));
1106 ctrlr->changed_ns->ns[0] = 0xffffffff;
1121 if (ctrlr->changed_ns_reported) {
1122 mtx_unlock(&ctrlr->lock);
1125 ctrlr->changed_ns_reported = true;
1126 mtx_unlock(&ctrlr->lock);
1128 nvmft_report_aer(ctrlr, NVME_ASYNC_EVENT_NS_ATTRIBUTE, 0x2, 0x0,