1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 */
7
8#include <sys/types.h>
9#include <sys/malloc.h>
10#include <sys/memdesc.h>
11#include <sys/refcount.h>
12
13#include <cam/cam.h>
14#include <cam/cam_ccb.h>
15#include <cam/cam_sim.h>
16#include <cam/cam_xpt_sim.h>
17#include <cam/cam_debug.h>
18
19#include <dev/nvmf/host/nvmf_var.h>
20
21/*
22 * The I/O completion may trigger after the received CQE if the I/O
23 * used a zero-copy mbuf that isn't harvested until after the NIC
24 * driver processes TX completions.  Use spriv_field0 to as a refcount.
25 *
26 * Store any I/O error returned in spriv_field1.
27 */
28static __inline u_int *
29ccb_refs(union ccb *ccb)
30{
31	return ((u_int *)&ccb->ccb_h.spriv_field0);
32}
33
34#define	spriv_ioerror	spriv_field1
35
36static void
37nvmf_ccb_done(union ccb *ccb)
38{
39	if (!refcount_release(ccb_refs(ccb)))
40		return;
41
42	if (nvmf_cqe_aborted(&ccb->nvmeio.cpl)) {
43		struct cam_sim *sim = xpt_path_sim(ccb->ccb_h.path);
44		struct nvmf_softc *sc = cam_sim_softc(sim);
45
46		if (nvmf_fail_disconnect || sc->sim_shutdown)
47			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
48		else
49			ccb->ccb_h.status = CAM_REQUEUE_REQ;
50		xpt_done(ccb);
51	} else if (ccb->nvmeio.cpl.status != 0) {
52		ccb->ccb_h.status = CAM_NVME_STATUS_ERROR;
53		xpt_done(ccb);
54	} else if (ccb->ccb_h.spriv_ioerror != 0) {
55		KASSERT(ccb->ccb_h.spriv_ioerror != EJUSTRETURN,
56		    ("%s: zero sized transfer without CQE error", __func__));
57		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
58		xpt_done(ccb);
59	} else {
60		ccb->ccb_h.status = CAM_REQ_CMP;
61		xpt_done_direct(ccb);
62	}
63}
64
65static void
66nvmf_ccb_io_complete(void *arg, size_t xfered, int error)
67{
68	union ccb *ccb = arg;
69
70	/*
71	 * TODO: Reporting partial completions requires extending
72	 * nvmeio to support resid and updating nda to handle partial
73	 * reads, either by returning partial success (or an error) to
74	 * the caller, or retrying all or part of the request.
75	 */
76	ccb->ccb_h.spriv_ioerror = error;
77	if (error == 0) {
78		if (xfered == 0) {
79#ifdef INVARIANTS
80			/*
81			 * If the request fails with an error in the CQE
82			 * there will be no data transferred but also no
83			 * I/O error.
84			 */
85			ccb->ccb_h.spriv_ioerror = EJUSTRETURN;
86#endif
87		} else
88			KASSERT(xfered == ccb->nvmeio.dxfer_len,
89			    ("%s: partial CCB completion", __func__));
90	}
91
92	nvmf_ccb_done(ccb);
93}
94
95static void
96nvmf_ccb_complete(void *arg, const struct nvme_completion *cqe)
97{
98	union ccb *ccb = arg;
99
100	ccb->nvmeio.cpl = *cqe;
101	nvmf_ccb_done(ccb);
102}
103
104static void
105nvmf_sim_io(struct nvmf_softc *sc, union ccb *ccb)
106{
107	struct ccb_nvmeio *nvmeio = &ccb->nvmeio;
108	struct memdesc mem;
109	struct nvmf_request *req;
110	struct nvmf_host_qpair *qp;
111
112	mtx_lock(&sc->sim_mtx);
113	if (sc->sim_disconnected) {
114		mtx_unlock(&sc->sim_mtx);
115		if (nvmf_fail_disconnect || sc->sim_shutdown)
116			nvmeio->ccb_h.status = CAM_DEV_NOT_THERE;
117		else
118			nvmeio->ccb_h.status = CAM_REQUEUE_REQ;
119		xpt_done(ccb);
120		return;
121	}
122	if (nvmeio->ccb_h.func_code == XPT_NVME_IO)
123		qp = nvmf_select_io_queue(sc);
124	else
125		qp = sc->admin;
126	req = nvmf_allocate_request(qp, &nvmeio->cmd, nvmf_ccb_complete,
127	    ccb, M_NOWAIT);
128	if (req == NULL) {
129		mtx_unlock(&sc->sim_mtx);
130		nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
131		xpt_done(ccb);
132		return;
133	}
134
135	if (nvmeio->dxfer_len != 0) {
136		refcount_init(ccb_refs(ccb), 2);
137		mem = memdesc_ccb(ccb);
138		nvmf_capsule_append_data(req->nc, &mem, nvmeio->dxfer_len,
139		    (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT,
140		    nvmf_ccb_io_complete, ccb);
141	} else
142		refcount_init(ccb_refs(ccb), 1);
143
144	/*
145	 * Clear spriv_ioerror as it can hold an earlier error if this
146	 * CCB was aborted and has been retried.
147	 */
148	ccb->ccb_h.spriv_ioerror = 0;
149	KASSERT(ccb->ccb_h.status == CAM_REQ_INPROG,
150	    ("%s: incoming CCB is not in-progress", __func__));
151	ccb->ccb_h.status |= CAM_SIM_QUEUED;
152	nvmf_submit_request(req);
153	mtx_unlock(&sc->sim_mtx);
154}
155
156static void
157nvmf_sim_action(struct cam_sim *sim, union ccb *ccb)
158{
159	struct nvmf_softc *sc = cam_sim_softc(sim);
160
161	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
162	    ("nvmf_sim_action: func= %#x\n",
163		ccb->ccb_h.func_code));
164
165	switch (ccb->ccb_h.func_code) {
166	case XPT_PATH_INQ:	/* Path routing inquiry */
167	{
168		struct ccb_pathinq *cpi = &ccb->cpi;
169
170		cpi->version_num = 1;
171		cpi->hba_inquiry = 0;
172		cpi->target_sprt = 0;
173		cpi->hba_misc =  PIM_UNMAPPED | PIM_NOSCAN;
174		cpi->hba_eng_cnt = 0;
175		cpi->max_target = 0;
176		cpi->max_lun = sc->cdata->nn;
177		cpi->async_flags = 0;
178		cpi->hpath_id = 0;
179		cpi->initiator_id = 0;
180		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
181		strlcpy(cpi->hba_vid, "NVMeoF", HBA_IDLEN);
182		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
183		cpi->unit_number = cam_sim_unit(sim);
184		cpi->bus_id = 0;
185
186		/* XXX: Same as iSCSI. */
187		cpi->base_transfer_speed = 150000;
188		cpi->protocol = PROTO_NVME;
189		cpi->protocol_version = sc->vs;
190		cpi->transport = XPORT_NVMF;
191		cpi->transport_version = sc->vs;
192		cpi->xport_specific.nvmf.nsid =
193		    xpt_path_lun_id(ccb->ccb_h.path);
194		cpi->xport_specific.nvmf.trtype = sc->trtype;
195		strlcpy(cpi->xport_specific.nvmf.dev_name,
196		    device_get_nameunit(sc->dev),
197		    sizeof(cpi->xport_specific.nvmf.dev_name));
198		cpi->maxio = sc->max_xfer_size;
199		cpi->hba_vendor = 0;
200		cpi->hba_device = 0;
201		cpi->hba_subvendor = 0;
202		cpi->hba_subdevice = 0;
203		cpi->ccb_h.status = CAM_REQ_CMP;
204		break;
205	}
206	case XPT_GET_TRAN_SETTINGS:	/* Get transport settings */
207	{
208		struct ccb_trans_settings *cts = &ccb->cts;
209		struct ccb_trans_settings_nvme *nvme;
210		struct ccb_trans_settings_nvmf *nvmf;
211
212		cts->protocol = PROTO_NVME;
213		cts->protocol_version = sc->vs;
214		cts->transport = XPORT_NVMF;
215		cts->transport_version = sc->vs;
216
217		nvme = &cts->proto_specific.nvme;
218		nvme->valid = CTS_NVME_VALID_SPEC;
219		nvme->spec = sc->vs;
220
221		nvmf = &cts->xport_specific.nvmf;
222		nvmf->valid = CTS_NVMF_VALID_TRTYPE;
223		nvmf->trtype = sc->trtype;
224		cts->ccb_h.status = CAM_REQ_CMP;
225		break;
226	}
227	case XPT_SET_TRAN_SETTINGS:	/* Set transport settings */
228		/*
229		 * No transfer settings can be set, but nvme_xpt sends
230		 * this anyway.
231		 */
232		ccb->ccb_h.status = CAM_REQ_CMP;
233		break;
234	case XPT_NVME_IO:		/* Execute the requested I/O */
235	case XPT_NVME_ADMIN:		/* or Admin operation */
236		nvmf_sim_io(sc, ccb);
237		return;
238	default:
239		/* XXX */
240		device_printf(sc->dev, "unhandled sim function %#x\n",
241		    ccb->ccb_h.func_code);
242		ccb->ccb_h.status = CAM_REQ_INVALID;
243		break;
244	}
245	xpt_done(ccb);
246}
247
248int
249nvmf_init_sim(struct nvmf_softc *sc)
250{
251	struct cam_devq *devq;
252	int max_trans;
253
254	max_trans = sc->max_pending_io * 3 / 4;
255	devq = cam_simq_alloc(max_trans);
256	if (devq == NULL) {
257		device_printf(sc->dev, "Failed to allocate CAM simq\n");
258		return (ENOMEM);
259	}
260
261	mtx_init(&sc->sim_mtx, "nvmf sim", NULL, MTX_DEF);
262	sc->sim = cam_sim_alloc(nvmf_sim_action, NULL, "nvme", sc,
263	    device_get_unit(sc->dev), NULL, max_trans, max_trans, devq);
264	if (sc->sim == NULL) {
265		device_printf(sc->dev, "Failed to allocate CAM sim\n");
266		cam_simq_free(devq);
267		mtx_destroy(&sc->sim_mtx);
268		return (ENXIO);
269	}
270	if (xpt_bus_register(sc->sim, sc->dev, 0) != CAM_SUCCESS) {
271		device_printf(sc->dev, "Failed to create CAM bus\n");
272		cam_sim_free(sc->sim, TRUE);
273		mtx_destroy(&sc->sim_mtx);
274		return (ENXIO);
275	}
276	if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
277	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
278		device_printf(sc->dev, "Failed to create CAM path\n");
279		xpt_bus_deregister(cam_sim_path(sc->sim));
280		cam_sim_free(sc->sim, TRUE);
281		mtx_destroy(&sc->sim_mtx);
282		return (ENXIO);
283	}
284	return (0);
285}
286
287void
288nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id)
289{
290	union ccb *ccb;
291
292	ccb = xpt_alloc_ccb_nowait();
293	if (ccb == NULL) {
294		device_printf(sc->dev,
295		    "unable to alloc CCB for rescan of namespace %u\n", id);
296		return;
297	}
298
299	/*
300	 * As with nvme_sim, map NVMe namespace IDs onto CAM unit
301	 * LUNs.
302	 */
303	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), 0,
304	    id) != CAM_REQ_CMP) {
305		device_printf(sc->dev,
306		    "Unable to create path for rescan of namespace %u\n", id);
307		xpt_free_ccb(ccb);
308		return;
309	}
310	xpt_rescan(ccb);
311}
312
313void
314nvmf_disconnect_sim(struct nvmf_softc *sc)
315{
316	mtx_lock(&sc->sim_mtx);
317	sc->sim_disconnected = true;
318	xpt_freeze_simq(sc->sim, 1);
319	mtx_unlock(&sc->sim_mtx);
320}
321
322void
323nvmf_reconnect_sim(struct nvmf_softc *sc)
324{
325	mtx_lock(&sc->sim_mtx);
326	sc->sim_disconnected = false;
327	mtx_unlock(&sc->sim_mtx);
328	xpt_release_simq(sc->sim, 1);
329}
330
331void
332nvmf_shutdown_sim(struct nvmf_softc *sc)
333{
334	mtx_lock(&sc->sim_mtx);
335	sc->sim_shutdown = true;
336	mtx_unlock(&sc->sim_mtx);
337	xpt_release_simq(sc->sim, 1);
338}
339
340void
341nvmf_destroy_sim(struct nvmf_softc *sc)
342{
343	xpt_async(AC_LOST_DEVICE, sc->path, NULL);
344	if (sc->sim_disconnected)
345		xpt_release_simq(sc->sim, 1);
346	xpt_free_path(sc->path);
347	xpt_bus_deregister(cam_sim_path(sc->sim));
348	cam_sim_free(sc->sim, TRUE);
349	mtx_destroy(&sc->sim_mtx);
350}
351