1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2023-2024 Chelsio Communications, Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 */ 7 8#ifndef __NVMF_VAR_H__ 9#define __NVMF_VAR_H__ 10 11#include <sys/_callout.h> 12#include <sys/_eventhandler.h> 13#include <sys/_lock.h> 14#include <sys/_mutex.h> 15#include <sys/_sx.h> 16#include <sys/_task.h> 17#include <sys/queue.h> 18#include <dev/nvme/nvme.h> 19#include <dev/nvmf/nvmf_transport.h> 20 21struct nvmf_aer; 22struct nvmf_capsule; 23struct nvmf_host_qpair; 24struct nvmf_namespace; 25 26typedef void nvmf_request_complete_t(void *, const struct nvme_completion *); 27 28struct nvmf_ivars { 29 struct nvmf_handoff_host *hh; 30 struct nvmf_handoff_qpair_params *io_params; 31 struct nvme_controller_data *cdata; 32}; 33 34struct nvmf_softc { 35 device_t dev; 36 37 struct nvmf_host_qpair *admin; 38 struct nvmf_host_qpair **io; 39 u_int num_io_queues; 40 enum nvmf_trtype trtype; 41 42 struct cam_sim *sim; 43 struct cam_path *path; 44 struct mtx sim_mtx; 45 bool sim_disconnected; 46 bool sim_shutdown; 47 48 struct nvmf_namespace **ns; 49 50 struct nvme_controller_data *cdata; 51 uint64_t cap; 52 uint32_t vs; 53 u_int max_pending_io; 54 u_long max_xfer_size; 55 56 struct cdev *cdev; 57 58 /* 59 * Keep Alive support depends on two timers. The 'tx' timer 60 * is responsible for sending KeepAlive commands and runs at 61 * half the timeout interval. The 'rx' timer is responsible 62 * for detecting an actual timeout. 63 * 64 * For efficient support of TKAS, the host does not reschedule 65 * these timers every time new commands are scheduled. 66 * Instead, the host sets the *_traffic flags when commands 67 * are sent and received. The timeout handlers check and 68 * clear these flags. This does mean it can take up to twice 69 * the timeout time to detect an AWOL controller. 70 */ 71 bool ka_traffic; /* Using TKAS? */ 72 73 volatile int ka_active_tx_traffic; 74 struct callout ka_tx_timer; 75 sbintime_t ka_tx_sbt; 76 77 volatile int ka_active_rx_traffic; 78 struct callout ka_rx_timer; 79 sbintime_t ka_rx_sbt; 80 81 struct sx connection_lock; 82 struct task disconnect_task; 83 bool detaching; 84 85 u_int num_aer; 86 struct nvmf_aer *aer; 87 88 eventhandler_tag shutdown_pre_sync_eh; 89 eventhandler_tag shutdown_post_sync_eh; 90}; 91 92struct nvmf_request { 93 struct nvmf_host_qpair *qp; 94 struct nvmf_capsule *nc; 95 nvmf_request_complete_t *cb; 96 void *cb_arg; 97 bool aer; 98 99 STAILQ_ENTRY(nvmf_request) link; 100}; 101 102struct nvmf_completion_status { 103 struct nvme_completion cqe; 104 bool done; 105 bool io_done; 106 int io_error; 107}; 108 109static __inline struct nvmf_host_qpair * 110nvmf_select_io_queue(struct nvmf_softc *sc) 111{ 112 /* TODO: Support multiple queues? */ 113 return (sc->io[0]); 114} 115 116static __inline bool 117nvmf_cqe_aborted(const struct nvme_completion *cqe) 118{ 119 uint16_t status; 120 121 status = le16toh(cqe->status); 122 return (NVME_STATUS_GET_SCT(status) == NVME_SCT_PATH_RELATED && 123 NVME_STATUS_GET_SC(status) == NVME_SC_COMMAND_ABORTED_BY_HOST); 124} 125 126static __inline void 127nvmf_status_init(struct nvmf_completion_status *status) 128{ 129 status->done = false; 130 status->io_done = true; 131 status->io_error = 0; 132} 133 134static __inline void 135nvmf_status_wait_io(struct nvmf_completion_status *status) 136{ 137 status->io_done = false; 138} 139 140#ifdef DRIVER_MODULE 141extern driver_t nvme_nvmf_driver; 142#endif 143 144#ifdef MALLOC_DECLARE 145MALLOC_DECLARE(M_NVMF); 146#endif 147 148/* If true, I/O requests will fail while the host is disconnected. */ 149extern bool nvmf_fail_disconnect; 150 151/* nvmf.c */ 152void nvmf_complete(void *arg, const struct nvme_completion *cqe); 153void nvmf_io_complete(void *arg, size_t xfered, int error); 154void nvmf_wait_for_reply(struct nvmf_completion_status *status); 155int nvmf_init_ivars(struct nvmf_ivars *ivars, struct nvmf_handoff_host *hh); 156void nvmf_free_ivars(struct nvmf_ivars *ivars); 157void nvmf_disconnect(struct nvmf_softc *sc); 158void nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid); 159void nvmf_rescan_all_ns(struct nvmf_softc *sc); 160int nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt, 161 bool admin); 162 163/* nvmf_aer.c */ 164void nvmf_init_aer(struct nvmf_softc *sc); 165int nvmf_start_aer(struct nvmf_softc *sc); 166void nvmf_destroy_aer(struct nvmf_softc *sc); 167 168/* nvmf_cmd.c */ 169bool nvmf_cmd_get_property(struct nvmf_softc *sc, uint32_t offset, 170 uint8_t size, nvmf_request_complete_t *cb, void *cb_arg, int how); 171bool nvmf_cmd_set_property(struct nvmf_softc *sc, uint32_t offset, 172 uint8_t size, uint64_t value, nvmf_request_complete_t *cb, void *cb_arg, 173 int how); 174bool nvmf_cmd_keep_alive(struct nvmf_softc *sc, nvmf_request_complete_t *cb, 175 void *cb_arg, int how); 176bool nvmf_cmd_identify_active_namespaces(struct nvmf_softc *sc, uint32_t id, 177 struct nvme_ns_list *nslist, nvmf_request_complete_t *req_cb, 178 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how); 179bool nvmf_cmd_identify_namespace(struct nvmf_softc *sc, uint32_t id, 180 struct nvme_namespace_data *nsdata, nvmf_request_complete_t *req_cb, 181 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how); 182bool nvmf_cmd_get_log_page(struct nvmf_softc *sc, uint32_t nsid, uint8_t lid, 183 uint64_t offset, void *buf, size_t len, nvmf_request_complete_t *req_cb, 184 void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how); 185 186/* nvmf_ctldev.c */ 187int nvmf_ctl_load(void); 188void nvmf_ctl_unload(void); 189 190/* nvmf_ns.c */ 191struct nvmf_namespace *nvmf_init_ns(struct nvmf_softc *sc, uint32_t id, 192 const struct nvme_namespace_data *data); 193void nvmf_disconnect_ns(struct nvmf_namespace *ns); 194void nvmf_reconnect_ns(struct nvmf_namespace *ns); 195void nvmf_shutdown_ns(struct nvmf_namespace *ns); 196void nvmf_destroy_ns(struct nvmf_namespace *ns); 197bool nvmf_update_ns(struct nvmf_namespace *ns, 198 const struct nvme_namespace_data *data); 199 200/* nvmf_qpair.c */ 201struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc, 202 enum nvmf_trtype trtype, struct nvmf_handoff_qpair_params *handoff, 203 const char *name); 204void nvmf_shutdown_qp(struct nvmf_host_qpair *qp); 205void nvmf_destroy_qp(struct nvmf_host_qpair *qp); 206struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp, 207 void *sqe, nvmf_request_complete_t *cb, void *cb_arg, int how); 208void nvmf_submit_request(struct nvmf_request *req); 209void nvmf_free_request(struct nvmf_request *req); 210 211/* nvmf_sim.c */ 212int nvmf_init_sim(struct nvmf_softc *sc); 213void nvmf_disconnect_sim(struct nvmf_softc *sc); 214void nvmf_reconnect_sim(struct nvmf_softc *sc); 215void nvmf_shutdown_sim(struct nvmf_softc *sc); 216void nvmf_destroy_sim(struct nvmf_softc *sc); 217void nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id); 218 219#endif /* !__NVMF_VAR_H__ */ 220