nvme_qpair.c revision 248756
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_qpair.c 248756 2013-03-26 21:00:18Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32
33#include <dev/pci/pcivar.h>
34
35#include "nvme_private.h"
36
37static void	_nvme_qpair_submit_request(struct nvme_qpair *qpair,
38					   struct nvme_request *req);
39
40static boolean_t
41nvme_completion_is_retry(const struct nvme_completion *cpl)
42{
43	/*
44	 * TODO: spec is not clear how commands that are aborted due
45	 *  to TLER will be marked.  So for now, it seems
46	 *  NAMESPACE_NOT_READY is the only case where we should
47	 *  look at the DNR bit.
48	 */
49	switch (cpl->status.sct) {
50	case NVME_SCT_GENERIC:
51		switch (cpl->status.sc) {
52		case NVME_SC_ABORTED_BY_REQUEST:
53			return (1);
54		case NVME_SC_NAMESPACE_NOT_READY:
55			if (cpl->status.dnr)
56				return (0);
57			else
58				return (1);
59		case NVME_SC_INVALID_OPCODE:
60		case NVME_SC_INVALID_FIELD:
61		case NVME_SC_COMMAND_ID_CONFLICT:
62		case NVME_SC_DATA_TRANSFER_ERROR:
63		case NVME_SC_ABORTED_POWER_LOSS:
64		case NVME_SC_INTERNAL_DEVICE_ERROR:
65		case NVME_SC_ABORTED_SQ_DELETION:
66		case NVME_SC_ABORTED_FAILED_FUSED:
67		case NVME_SC_ABORTED_MISSING_FUSED:
68		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
69		case NVME_SC_COMMAND_SEQUENCE_ERROR:
70		case NVME_SC_LBA_OUT_OF_RANGE:
71		case NVME_SC_CAPACITY_EXCEEDED:
72		default:
73			return (0);
74		}
75	case NVME_SCT_COMMAND_SPECIFIC:
76	case NVME_SCT_MEDIA_ERROR:
77	case NVME_SCT_VENDOR_SPECIFIC:
78	default:
79		return (0);
80	}
81}
82
83static void
84nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
85    uint16_t cid)
86{
87
88	bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
89	bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
90
91	bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp,
92	    sizeof(tr->prp), nvme_single_map, &tr->prp_bus_addr, 0);
93
94	callout_init(&tr->timer, 1);
95	tr->cid = cid;
96	tr->qpair = qpair;
97}
98
99static void
100nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
101    struct nvme_completion *cpl, boolean_t print_on_error)
102{
103	struct nvme_request	*req;
104	boolean_t		retry, error;
105
106	req = tr->req;
107	error = nvme_completion_is_error(cpl);
108	retry = error && nvme_completion_is_retry(cpl);
109
110	if (error && print_on_error) {
111		nvme_dump_completion(cpl);
112		nvme_dump_command(&req->cmd);
113	}
114
115	qpair->act_tr[cpl->cid] = NULL;
116
117	KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
118
119	if (req->cb_fn && !retry)
120		req->cb_fn(req->cb_arg, cpl);
121
122	mtx_lock(&qpair->lock);
123	callout_stop(&tr->timer);
124
125	if (retry)
126		nvme_qpair_submit_tracker(qpair, tr);
127	else {
128		if (req->payload_size > 0 || req->uio != NULL)
129			bus_dmamap_unload(qpair->dma_tag,
130			    tr->payload_dma_map);
131
132		nvme_free_request(req);
133		tr->req = NULL;
134
135		TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
136		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
137
138		/*
139		 * If the controller is in the middle of resetting, don't
140		 *  try to submit queued requests here - let the reset logic
141		 *  handle that instead.
142		 */
143		if (!STAILQ_EMPTY(&qpair->queued_req) &&
144		    !qpair->ctrlr->is_resetting) {
145			req = STAILQ_FIRST(&qpair->queued_req);
146			STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
147			_nvme_qpair_submit_request(qpair, req);
148		}
149	}
150
151	mtx_unlock(&qpair->lock);
152}
153
154static void
155nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
156    struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
157    boolean_t print_on_error)
158{
159	struct nvme_completion	cpl;
160
161	memset(&cpl, 0, sizeof(cpl));
162	cpl.sqid = qpair->id;
163	cpl.cid = tr->cid;
164	cpl.status.sct = sct;
165	cpl.status.sc = sc;
166	nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
167}
168
169void
170nvme_qpair_process_completions(struct nvme_qpair *qpair)
171{
172	struct nvme_tracker	*tr;
173	struct nvme_completion	*cpl;
174
175	qpair->num_intr_handler_calls++;
176
177	if (!qpair->is_enabled)
178		/*
179		 * qpair is not enabled, likely because a controller reset is
180		 *  is in progress.  Ignore the interrupt - any I/O that was
181		 *  associated with this interrupt will get retried when the
182		 *  reset is complete.
183		 */
184		return;
185
186	while (1) {
187		cpl = &qpair->cpl[qpair->cq_head];
188
189		if (cpl->status.p != qpair->phase)
190			break;
191
192		tr = qpair->act_tr[cpl->cid];
193
194		if (tr != NULL) {
195			nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE);
196			qpair->sq_head = cpl->sqhd;
197		} else {
198			printf("cpl does not map to outstanding cmd\n");
199			nvme_dump_completion(cpl);
200			KASSERT(0, ("received completion for unknown cmd\n"));
201		}
202
203		if (++qpair->cq_head == qpair->num_entries) {
204			qpair->cq_head = 0;
205			qpair->phase = !qpair->phase;
206		}
207
208		nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
209		    qpair->cq_head);
210	}
211}
212
213static void
214nvme_qpair_msix_handler(void *arg)
215{
216	struct nvme_qpair *qpair = arg;
217
218	nvme_qpair_process_completions(qpair);
219}
220
221void
222nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
223    uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
224    uint32_t max_xfer_size, struct nvme_controller *ctrlr)
225{
226	struct nvme_tracker	*tr;
227	uint32_t		i;
228
229	qpair->id = id;
230	qpair->vector = vector;
231	qpair->num_entries = num_entries;
232#ifdef CHATHAM2
233	/*
234	 * Chatham prototype board starts having issues at higher queue
235	 *  depths.  So use a conservative estimate here of no more than 64
236	 *  outstanding I/O per queue at any one point.
237	 */
238	if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
239		num_trackers = min(num_trackers, 64);
240#endif
241	qpair->num_trackers = num_trackers;
242	qpair->max_xfer_size = max_xfer_size;
243	qpair->ctrlr = ctrlr;
244
245	if (ctrlr->msix_enabled) {
246
247		/*
248		 * MSI-X vector resource IDs start at 1, so we add one to
249		 *  the queue's vector to get the corresponding rid to use.
250		 */
251		qpair->rid = vector + 1;
252
253		qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
254		    &qpair->rid, RF_ACTIVE);
255
256		bus_setup_intr(ctrlr->dev, qpair->res,
257		    INTR_TYPE_MISC | INTR_MPSAFE, NULL,
258		    nvme_qpair_msix_handler, qpair, &qpair->tag);
259	}
260
261	mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
262
263	bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
264	    sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR,
265	    BUS_SPACE_MAXADDR, NULL, NULL, qpair->max_xfer_size,
266	    (qpair->max_xfer_size/PAGE_SIZE)+1, PAGE_SIZE, 0,
267	    NULL, NULL, &qpair->dma_tag);
268
269	qpair->num_cmds = 0;
270	qpair->num_intr_handler_calls = 0;
271
272	/* TODO: error checking on contigmalloc, bus_dmamap_load calls */
273	qpair->cmd = contigmalloc(qpair->num_entries *
274	    sizeof(struct nvme_command), M_NVME, M_ZERO | M_NOWAIT,
275	    0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
276	qpair->cpl = contigmalloc(qpair->num_entries *
277	    sizeof(struct nvme_completion), M_NVME, M_ZERO | M_NOWAIT,
278	    0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
279
280	bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
281	bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
282
283	bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map,
284	    qpair->cmd, qpair->num_entries * sizeof(struct nvme_command),
285	    nvme_single_map, &qpair->cmd_bus_addr, 0);
286	bus_dmamap_load(qpair->dma_tag, qpair->cpl_dma_map,
287	    qpair->cpl, qpair->num_entries * sizeof(struct nvme_completion),
288	    nvme_single_map, &qpair->cpl_bus_addr, 0);
289
290	qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
291	qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
292
293	TAILQ_INIT(&qpair->free_tr);
294	TAILQ_INIT(&qpair->outstanding_tr);
295	STAILQ_INIT(&qpair->queued_req);
296
297	for (i = 0; i < qpair->num_trackers; i++) {
298		tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_NOWAIT);
299
300		if (tr == NULL) {
301			printf("warning: nvme tracker malloc failed\n");
302			break;
303		}
304
305		nvme_qpair_construct_tracker(qpair, tr, i);
306		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
307	}
308
309	qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * qpair->num_entries,
310	    M_NVME, M_ZERO | M_NOWAIT);
311}
312
313static void
314nvme_qpair_destroy(struct nvme_qpair *qpair)
315{
316	struct nvme_tracker	*tr;
317
318	if (qpair->tag)
319		bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
320
321	if (qpair->res)
322		bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
323		    rman_get_rid(qpair->res), qpair->res);
324
325	if (qpair->dma_tag)
326		bus_dma_tag_destroy(qpair->dma_tag);
327
328	if (qpair->act_tr)
329		free(qpair->act_tr, M_NVME);
330
331	while (!TAILQ_EMPTY(&qpair->free_tr)) {
332		tr = TAILQ_FIRST(&qpair->free_tr);
333		TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
334		bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
335		bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
336		free(tr, M_NVME);
337	}
338}
339
340static void
341nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
342{
343	struct nvme_tracker	*tr;
344
345	tr = TAILQ_FIRST(&qpair->outstanding_tr);
346	while (tr != NULL) {
347		if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
348			nvme_qpair_manual_complete_tracker(qpair, tr,
349			    NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION,
350			    FALSE);
351			tr = TAILQ_FIRST(&qpair->outstanding_tr);
352		} else {
353			tr = TAILQ_NEXT(tr, tailq);
354		}
355	}
356}
357
358void
359nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
360{
361
362	nvme_admin_qpair_abort_aers(qpair);
363
364	/*
365	 * For NVMe, you don't send delete queue commands for the admin
366	 *  queue, so we just need to unload and free the cmd and cpl memory.
367	 */
368	bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
369	bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
370
371	contigfree(qpair->cmd,
372	    qpair->num_entries * sizeof(struct nvme_command), M_NVME);
373
374	bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
375	bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
376	contigfree(qpair->cpl,
377	    qpair->num_entries * sizeof(struct nvme_completion), M_NVME);
378
379	nvme_qpair_destroy(qpair);
380}
381
382static void
383nvme_free_cmd_ring(void *arg, const struct nvme_completion *status)
384{
385	struct nvme_qpair *qpair;
386
387	qpair = (struct nvme_qpair *)arg;
388	bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
389	bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
390	contigfree(qpair->cmd,
391	    qpair->num_entries * sizeof(struct nvme_command), M_NVME);
392	qpair->cmd = NULL;
393}
394
395static void
396nvme_free_cpl_ring(void *arg, const struct nvme_completion *status)
397{
398	struct nvme_qpair *qpair;
399
400	qpair = (struct nvme_qpair *)arg;
401	bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
402	bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
403	contigfree(qpair->cpl,
404	    qpair->num_entries * sizeof(struct nvme_completion), M_NVME);
405	qpair->cpl = NULL;
406}
407
408void
409nvme_io_qpair_destroy(struct nvme_qpair *qpair)
410{
411	struct nvme_controller *ctrlr = qpair->ctrlr;
412
413	if (qpair->num_entries > 0) {
414
415		nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_free_cmd_ring,
416		    qpair);
417		/* Spin until free_cmd_ring sets qpair->cmd to NULL. */
418		while (qpair->cmd)
419			DELAY(5);
420
421		nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_free_cpl_ring,
422		    qpair);
423		/* Spin until free_cpl_ring sets qpair->cmd to NULL. */
424		while (qpair->cpl)
425			DELAY(5);
426
427		nvme_qpair_destroy(qpair);
428	}
429}
430
431static void
432nvme_abort_complete(void *arg, const struct nvme_completion *status)
433{
434	struct nvme_tracker	*tr = arg;
435
436	/*
437	 * If cdw0 == 1, the controller was not able to abort the command
438	 *  we requested.  We still need to check the active tracker array,
439	 *  to cover race where I/O timed out at same time controller was
440	 *  completing the I/O.
441	 */
442	if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
443		/*
444		 * An I/O has timed out, and the controller was unable to
445		 *  abort it for some reason.  Construct a fake completion
446		 *  status, and then complete the I/O's tracker manually.
447		 */
448		printf("abort command failed, aborting command manually\n");
449		nvme_qpair_manual_complete_tracker(tr->qpair, tr,
450		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
451	}
452}
453
454static void
455nvme_timeout(void *arg)
456{
457	struct nvme_tracker	*tr = arg;
458	struct nvme_qpair	*qpair = tr->qpair;
459	struct nvme_controller	*ctrlr = qpair->ctrlr;
460	union csts_register	csts;
461
462	/* Read csts to get value of cfs - controller fatal status. */
463	csts.raw = nvme_mmio_read_4(ctrlr, csts);
464
465	if (ctrlr->enable_aborts && csts.bits.cfs == 0) {
466		/*
467		 * If aborts are enabled, only use them if the controller is
468		 *  not reporting fatal status.
469		 */
470		nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
471		    nvme_abort_complete, tr);
472	} else
473		nvme_ctrlr_reset(ctrlr);
474}
475
476void
477nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
478{
479	struct nvme_request	*req;
480	struct nvme_controller	*ctrlr;
481
482	mtx_assert(&qpair->lock, MA_OWNED);
483
484	req = tr->req;
485	req->cmd.cid = tr->cid;
486	qpair->act_tr[tr->cid] = tr;
487	ctrlr = qpair->ctrlr;
488
489	if (req->timeout)
490#if __FreeBSD_version >= 800030
491		callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz,
492		    nvme_timeout, tr);
493#else
494		callout_reset(&tr->timer, ctrlr->timeout_period * hz,
495		    nvme_timeout, tr);
496#endif
497
498	/* Copy the command from the tracker to the submission queue. */
499	memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
500
501	if (++qpair->sq_tail == qpair->num_entries)
502		qpair->sq_tail = 0;
503
504	wmb();
505	nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
506	    qpair->sq_tail);
507
508	qpair->num_cmds++;
509}
510
511static void
512_nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
513{
514	struct nvme_tracker	*tr;
515	int			err;
516
517	mtx_assert(&qpair->lock, MA_OWNED);
518
519	tr = TAILQ_FIRST(&qpair->free_tr);
520
521	if (tr == NULL || !qpair->is_enabled) {
522		/*
523		 * No tracker is available, or the qpair is disabled due to
524		 *  an in-progress controller-level reset.
525		 *
526		 * Put the request on the qpair's request queue to be processed
527		 *  when a tracker frees up via a command completion or when
528		 *  the controller reset is completed.
529		 */
530		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
531		return;
532	}
533
534	TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
535	TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
536	tr->req = req;
537
538	if (req->uio == NULL) {
539		if (req->payload_size > 0) {
540			err = bus_dmamap_load(tr->qpair->dma_tag,
541					      tr->payload_dma_map, req->payload,
542					      req->payload_size,
543					      nvme_payload_map, tr, 0);
544			if (err != 0)
545				panic("bus_dmamap_load returned non-zero!\n");
546		} else
547			nvme_qpair_submit_tracker(tr->qpair, tr);
548	} else {
549		err = bus_dmamap_load_uio(tr->qpair->dma_tag,
550					  tr->payload_dma_map, req->uio,
551					  nvme_payload_map_uio, tr, 0);
552		if (err != 0)
553			panic("bus_dmamap_load returned non-zero!\n");
554	}
555}
556
557void
558nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
559{
560
561	mtx_lock(&qpair->lock);
562	_nvme_qpair_submit_request(qpair, req);
563	mtx_unlock(&qpair->lock);
564}
565
566static void
567nvme_qpair_enable(struct nvme_qpair *qpair)
568{
569
570	qpair->is_enabled = TRUE;
571	qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
572
573	/*
574	 * First time through the completion queue, HW will set phase
575	 *  bit on completions to 1.  So set this to 1 here, indicating
576	 *  we're looking for a 1 to know which entries have completed.
577	 *  we'll toggle the bit each time when the completion queue
578	 *  rolls over.
579	 */
580	qpair->phase = 1;
581
582	memset(qpair->cmd, 0,
583	    qpair->num_entries * sizeof(struct nvme_command));
584	memset(qpair->cpl, 0,
585	    qpair->num_entries * sizeof(struct nvme_completion));
586}
587
588void
589nvme_admin_qpair_enable(struct nvme_qpair *qpair)
590{
591
592	nvme_qpair_enable(qpair);
593}
594
595void
596nvme_io_qpair_enable(struct nvme_qpair *qpair)
597{
598	STAILQ_HEAD(, nvme_request)	temp;
599	struct nvme_tracker		*tr;
600	struct nvme_request		*req;
601
602	mtx_lock(&qpair->lock);
603
604	nvme_qpair_enable(qpair);
605
606	TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) {
607		device_printf(qpair->ctrlr->dev,
608		    "resubmitting outstanding i/o\n");
609		nvme_dump_command(&tr->req->cmd);
610		nvme_qpair_submit_tracker(qpair, tr);
611	}
612
613	STAILQ_INIT(&temp);
614	STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
615
616	while (!STAILQ_EMPTY(&temp)) {
617		req = STAILQ_FIRST(&temp);
618		STAILQ_REMOVE_HEAD(&temp, stailq);
619		device_printf(qpair->ctrlr->dev,
620		    "resubmitting queued i/o\n");
621		nvme_dump_command(&req->cmd);
622		_nvme_qpair_submit_request(qpair, req);
623	}
624
625	mtx_unlock(&qpair->lock);
626}
627
628static void
629nvme_qpair_disable(struct nvme_qpair *qpair)
630{
631	struct nvme_tracker *tr;
632
633	qpair->is_enabled = FALSE;
634	mtx_lock(&qpair->lock);
635	TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
636		callout_stop(&tr->timer);
637	mtx_unlock(&qpair->lock);
638}
639
640void
641nvme_admin_qpair_disable(struct nvme_qpair *qpair)
642{
643
644	nvme_qpair_disable(qpair);
645	nvme_admin_qpair_abort_aers(qpair);
646}
647
648void
649nvme_io_qpair_disable(struct nvme_qpair *qpair)
650{
651
652	nvme_qpair_disable(qpair);
653}
654