Deleted Added
full compact
nvme_qpair.c (248741) nvme_qpair.c (248746)
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_qpair.c 248741 2013-03-26 18:45:16Z jimharris $");
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_qpair.c 248746 2013-03-26 19:50:46Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32
33#include <dev/pci/pcivar.h>
34
35#include "nvme_private.h"
36

--- 45 unchanged lines hidden (view full) ---

82 case NVME_SCT_COMMAND_SPECIFIC:
83 case NVME_SCT_MEDIA_ERROR:
84 case NVME_SCT_VENDOR_SPECIFIC:
85 default:
86 return (0);
87 }
88}
89
29
30#include <sys/param.h>
31#include <sys/bus.h>
32
33#include <dev/pci/pcivar.h>
34
35#include "nvme_private.h"
36

--- 45 unchanged lines hidden (view full) ---

82 case NVME_SCT_COMMAND_SPECIFIC:
83 case NVME_SCT_MEDIA_ERROR:
84 case NVME_SCT_VENDOR_SPECIFIC:
85 default:
86 return (0);
87 }
88}
89
90static struct nvme_tracker *
91nvme_qpair_find_tracker(struct nvme_qpair *qpair, struct nvme_request *req)
92{
93 struct nvme_tracker *tr;
94 uint32_t i;
95
96 KASSERT(req != NULL, ("%s: called with NULL req\n", __func__));
97
98 for (i = 0; i < qpair->num_entries; ++i) {
99 tr = qpair->act_tr[i];
100 if (tr != NULL && tr->req == req)
101 return (tr);
102 }
103
104 return (NULL);
105}
106
107static void
108nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
109 uint16_t cid)
110{
111
112 bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
113 bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
114

--- 27 unchanged lines hidden (view full) ---

142
143 if (req->cb_fn && !retry)
144 req->cb_fn(req->cb_arg, cpl);
145
146 mtx_lock(&qpair->lock);
147 callout_stop(&tr->timer);
148
149 if (retry)
90static void
91nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
92 uint16_t cid)
93{
94
95 bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
96 bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
97

--- 27 unchanged lines hidden (view full) ---

125
126 if (req->cb_fn && !retry)
127 req->cb_fn(req->cb_arg, cpl);
128
129 mtx_lock(&qpair->lock);
130 callout_stop(&tr->timer);
131
132 if (retry)
150 nvme_qpair_submit_cmd(qpair, tr);
133 nvme_qpair_submit_tracker(qpair, tr);
151 else {
152 if (req->payload_size > 0 || req->uio != NULL)
153 bus_dmamap_unload(qpair->dma_tag,
154 tr->payload_dma_map);
155
156 nvme_free_request(req);
157 tr->req = NULL;
158

--- 5 unchanged lines hidden (view full) ---

164 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
165 _nvme_qpair_submit_request(qpair, req);
166 }
167 }
168
169 mtx_unlock(&qpair->lock);
170}
171
134 else {
135 if (req->payload_size > 0 || req->uio != NULL)
136 bus_dmamap_unload(qpair->dma_tag,
137 tr->payload_dma_map);
138
139 nvme_free_request(req);
140 tr->req = NULL;
141

--- 5 unchanged lines hidden (view full) ---

147 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
148 _nvme_qpair_submit_request(qpair, req);
149 }
150 }
151
152 mtx_unlock(&qpair->lock);
153}
154
155static void
156nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
157 struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
158 boolean_t print_on_error)
159{
160 struct nvme_completion cpl;
161
162 memset(&cpl, 0, sizeof(cpl));
163 cpl.sqid = qpair->id;
164 cpl.cid = tr->cid;
165 cpl.sf_sct = sct;
166 cpl.sf_sc = sc;
167 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
168}
169
172void
173nvme_qpair_process_completions(struct nvme_qpair *qpair)
174{
175 struct nvme_tracker *tr;
176 struct nvme_completion *cpl;
177
178 qpair->num_intr_handler_calls++;
179
170void
171nvme_qpair_process_completions(struct nvme_qpair *qpair)
172{
173 struct nvme_tracker *tr;
174 struct nvme_completion *cpl;
175
176 qpair->num_intr_handler_calls++;
177
178 if (!qpair->is_enabled)
179 /*
180 * qpair is not enabled, likely because a controller reset is
181 * is in progress. Ignore the interrupt - any I/O that was
182 * associated with this interrupt will get retried when the
183 * reset is complete.
184 */
185 return;
186
180 while (1) {
181 cpl = &qpair->cpl[qpair->cq_head];
182
183 if (cpl->p != qpair->phase)
184 break;
185
186 tr = qpair->act_tr[cpl->cid];
187

--- 43 unchanged lines hidden (view full) ---

231 */
232 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
233 num_trackers = min(num_trackers, 64);
234#endif
235 qpair->num_trackers = num_trackers;
236 qpair->max_xfer_size = max_xfer_size;
237 qpair->ctrlr = ctrlr;
238
187 while (1) {
188 cpl = &qpair->cpl[qpair->cq_head];
189
190 if (cpl->p != qpair->phase)
191 break;
192
193 tr = qpair->act_tr[cpl->cid];
194

--- 43 unchanged lines hidden (view full) ---

238 */
239 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
240 num_trackers = min(num_trackers, 64);
241#endif
242 qpair->num_trackers = num_trackers;
243 qpair->max_xfer_size = max_xfer_size;
244 qpair->ctrlr = ctrlr;
245
239 /*
240 * First time through the completion queue, HW will set phase
241 * bit on completions to 1. So set this to 1 here, indicating
242 * we're looking for a 1 to know which entries have completed.
243 * we'll toggle the bit each time when the completion queue
244 * rolls over.
245 */
246 qpair->phase = 1;
247
248 if (ctrlr->msix_enabled) {
249
250 /*
251 * MSI-X vector resource IDs start at 1, so we add one to
252 * the queue's vector to get the corresponding rid to use.
253 */
254 qpair->rid = vector + 1;
255

--- 10 unchanged lines hidden (view full) ---

266 bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
267 sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR,
268 BUS_SPACE_MAXADDR, NULL, NULL, qpair->max_xfer_size,
269 (qpair->max_xfer_size/PAGE_SIZE)+1, PAGE_SIZE, 0,
270 NULL, NULL, &qpair->dma_tag);
271
272 qpair->num_cmds = 0;
273 qpair->num_intr_handler_calls = 0;
246 if (ctrlr->msix_enabled) {
247
248 /*
249 * MSI-X vector resource IDs start at 1, so we add one to
250 * the queue's vector to get the corresponding rid to use.
251 */
252 qpair->rid = vector + 1;
253

--- 10 unchanged lines hidden (view full) ---

264 bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
265 sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR,
266 BUS_SPACE_MAXADDR, NULL, NULL, qpair->max_xfer_size,
267 (qpair->max_xfer_size/PAGE_SIZE)+1, PAGE_SIZE, 0,
268 NULL, NULL, &qpair->dma_tag);
269
270 qpair->num_cmds = 0;
271 qpair->num_intr_handler_calls = 0;
274 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
275
276 /* TODO: error checking on contigmalloc, bus_dmamap_load calls */
277 qpair->cmd = contigmalloc(qpair->num_entries *
278 sizeof(struct nvme_command), M_NVME, M_ZERO | M_NOWAIT,
279 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
280 qpair->cpl = contigmalloc(qpair->num_entries *
281 sizeof(struct nvme_completion), M_NVME, M_ZERO | M_NOWAIT,
282 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);

--- 53 unchanged lines hidden (view full) ---

336 tr = TAILQ_FIRST(&qpair->free_tr);
337 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
338 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
339 bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
340 free(tr, M_NVME);
341 }
342}
343
272
273 /* TODO: error checking on contigmalloc, bus_dmamap_load calls */
274 qpair->cmd = contigmalloc(qpair->num_entries *
275 sizeof(struct nvme_command), M_NVME, M_ZERO | M_NOWAIT,
276 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
277 qpair->cpl = contigmalloc(qpair->num_entries *
278 sizeof(struct nvme_completion), M_NVME, M_ZERO | M_NOWAIT,
279 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);

--- 53 unchanged lines hidden (view full) ---

333 tr = TAILQ_FIRST(&qpair->free_tr);
334 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
335 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
336 bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
337 free(tr, M_NVME);
338 }
339}
340
341static void
342nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
343{
344 struct nvme_tracker *tr;
345
346 tr = TAILQ_FIRST(&qpair->outstanding_tr);
347 while (tr != NULL) {
348 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
349 nvme_qpair_manual_complete_tracker(qpair, tr,
350 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION,
351 FALSE);
352 tr = TAILQ_FIRST(&qpair->outstanding_tr);
353 } else {
354 tr = TAILQ_NEXT(tr, tailq);
355 }
356 }
357}
358
344void
345nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
346{
347
359void
360nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
361{
362
363 nvme_admin_qpair_abort_aers(qpair);
364
348 /*
349 * For NVMe, you don't send delete queue commands for the admin
350 * queue, so we just need to unload and free the cmd and cpl memory.
351 */
352 bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
353 bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
354
355 contigfree(qpair->cmd,

--- 52 unchanged lines hidden (view full) ---

408 while (qpair->cpl)
409 DELAY(5);
410
411 nvme_qpair_destroy(qpair);
412 }
413}
414
415static void
365 /*
366 * For NVMe, you don't send delete queue commands for the admin
367 * queue, so we just need to unload and free the cmd and cpl memory.
368 */
369 bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
370 bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
371
372 contigfree(qpair->cmd,

--- 52 unchanged lines hidden (view full) ---

425 while (qpair->cpl)
426 DELAY(5);
427
428 nvme_qpair_destroy(qpair);
429 }
430}
431
432static void
416nvme_qpair_manual_abort_tracker(struct nvme_qpair *qpair,
417 struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
418 boolean_t print_on_error)
419{
420 struct nvme_completion cpl;
421
422 memset(&cpl, 0, sizeof(cpl));
423 cpl.sqid = qpair->id;
424 cpl.cid = tr->cid;
425 cpl.sf_sct = sct;
426 cpl.sf_sc = sc;
427 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
428}
429
430void
431nvme_qpair_manual_abort_request(struct nvme_qpair *qpair,
432 struct nvme_request *req, uint32_t sct, uint32_t sc,
433 boolean_t print_on_error)
434{
435 struct nvme_tracker *tr;
436
437 tr = nvme_qpair_find_tracker(qpair, req);
438
439 if (tr == NULL) {
440 printf("%s: request not found\n", __func__);
441 nvme_dump_command(&req->cmd);
442 return;
443 }
444
445 nvme_qpair_manual_abort_tracker(qpair, tr, sct, sc, print_on_error);
446}
447
448static void
449nvme_abort_complete(void *arg, const struct nvme_completion *status)
450{
451 struct nvme_tracker *tr = arg;
452
453 /*
454 * If cdw0 == 1, the controller was not able to abort the command
455 * we requested. We still need to check the active tracker array,
456 * to cover race where I/O timed out at same time controller was
457 * completing the I/O.
458 */
459 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
460 /*
461 * An I/O has timed out, and the controller was unable to
462 * abort it for some reason. Construct a fake completion
463 * status, and then complete the I/O's tracker manually.
464 */
465 printf("abort command failed, aborting command manually\n");
433nvme_abort_complete(void *arg, const struct nvme_completion *status)
434{
435 struct nvme_tracker *tr = arg;
436
437 /*
438 * If cdw0 == 1, the controller was not able to abort the command
439 * we requested. We still need to check the active tracker array,
440 * to cover race where I/O timed out at same time controller was
441 * completing the I/O.
442 */
443 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
444 /*
445 * An I/O has timed out, and the controller was unable to
446 * abort it for some reason. Construct a fake completion
447 * status, and then complete the I/O's tracker manually.
448 */
449 printf("abort command failed, aborting command manually\n");
466 nvme_qpair_manual_abort_tracker(tr->qpair, tr,
450 nvme_qpair_manual_complete_tracker(tr->qpair, tr,
467 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
468 }
469}
470
471static void
472nvme_timeout(void *arg)
473{
474 struct nvme_tracker *tr = arg;
475
476 nvme_ctrlr_cmd_abort(tr->qpair->ctrlr, tr->cid, tr->qpair->id,
477 nvme_abort_complete, tr);
478}
479
480void
451 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
452 }
453}
454
455static void
456nvme_timeout(void *arg)
457{
458 struct nvme_tracker *tr = arg;
459
460 nvme_ctrlr_cmd_abort(tr->qpair->ctrlr, tr->cid, tr->qpair->id,
461 nvme_abort_complete, tr);
462}
463
464void
481nvme_qpair_submit_cmd(struct nvme_qpair *qpair, struct nvme_tracker *tr)
465nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
482{
483 struct nvme_request *req;
484
466{
467 struct nvme_request *req;
468
469 mtx_assert(&qpair->lock, MA_OWNED);
470
485 req = tr->req;
486 req->cmd.cid = tr->cid;
487 qpair->act_tr[tr->cid] = tr;
488
489 if (req->timeout > 0)
490#if __FreeBSD_version >= 800030
491 callout_reset_curcpu(&tr->timer, req->timeout * hz,
492 nvme_timeout, tr);

--- 19 unchanged lines hidden (view full) ---

512{
513 struct nvme_tracker *tr;
514 int err;
515
516 mtx_assert(&qpair->lock, MA_OWNED);
517
518 tr = TAILQ_FIRST(&qpair->free_tr);
519
471 req = tr->req;
472 req->cmd.cid = tr->cid;
473 qpair->act_tr[tr->cid] = tr;
474
475 if (req->timeout > 0)
476#if __FreeBSD_version >= 800030
477 callout_reset_curcpu(&tr->timer, req->timeout * hz,
478 nvme_timeout, tr);

--- 19 unchanged lines hidden (view full) ---

498{
499 struct nvme_tracker *tr;
500 int err;
501
502 mtx_assert(&qpair->lock, MA_OWNED);
503
504 tr = TAILQ_FIRST(&qpair->free_tr);
505
520 if (tr == NULL) {
506 if (tr == NULL || !qpair->is_enabled) {
521 /*
507 /*
522 * No tracker is available. Put the request on the qpair's
523 * request queue to be processed when a tracker frees up
524 * via a command completion.
508 * No tracker is available, or the qpair is disabled due to
509 * an in-progress controller-level reset.
510 *
511 * Put the request on the qpair's request queue to be processed
512 * when a tracker frees up via a command completion or when
513 * the controller reset is completed.
525 */
526 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
527 return;
528 }
529
530 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
531 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
532 tr->req = req;
533
534 if (req->uio == NULL) {
535 if (req->payload_size > 0) {
536 err = bus_dmamap_load(tr->qpair->dma_tag,
537 tr->payload_dma_map, req->payload,
538 req->payload_size,
539 nvme_payload_map, tr, 0);
540 if (err != 0)
541 panic("bus_dmamap_load returned non-zero!\n");
542 } else
514 */
515 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
516 return;
517 }
518
519 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
520 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
521 tr->req = req;
522
523 if (req->uio == NULL) {
524 if (req->payload_size > 0) {
525 err = bus_dmamap_load(tr->qpair->dma_tag,
526 tr->payload_dma_map, req->payload,
527 req->payload_size,
528 nvme_payload_map, tr, 0);
529 if (err != 0)
530 panic("bus_dmamap_load returned non-zero!\n");
531 } else
543 nvme_qpair_submit_cmd(tr->qpair, tr);
532 nvme_qpair_submit_tracker(tr->qpair, tr);
544 } else {
545 err = bus_dmamap_load_uio(tr->qpair->dma_tag,
546 tr->payload_dma_map, req->uio,
547 nvme_payload_map_uio, tr, 0);
548 if (err != 0)
549 panic("bus_dmamap_load returned non-zero!\n");
550 }
551}
552
553void
554nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
555{
556
557 mtx_lock(&qpair->lock);
558 _nvme_qpair_submit_request(qpair, req);
559 mtx_unlock(&qpair->lock);
560}
533 } else {
534 err = bus_dmamap_load_uio(tr->qpair->dma_tag,
535 tr->payload_dma_map, req->uio,
536 nvme_payload_map_uio, tr, 0);
537 if (err != 0)
538 panic("bus_dmamap_load returned non-zero!\n");
539 }
540}
541
542void
543nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
544{
545
546 mtx_lock(&qpair->lock);
547 _nvme_qpair_submit_request(qpair, req);
548 mtx_unlock(&qpair->lock);
549}
550
551static void
552nvme_qpair_enable(struct nvme_qpair *qpair)
553{
554
555 qpair->is_enabled = TRUE;
556 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
557
558 /*
559 * First time through the completion queue, HW will set phase
560 * bit on completions to 1. So set this to 1 here, indicating
561 * we're looking for a 1 to know which entries have completed.
562 * we'll toggle the bit each time when the completion queue
563 * rolls over.
564 */
565 qpair->phase = 1;
566
567 memset(qpair->cmd, 0,
568 qpair->num_entries * sizeof(struct nvme_command));
569 memset(qpair->cpl, 0,
570 qpair->num_entries * sizeof(struct nvme_completion));
571}
572
573void
574nvme_admin_qpair_enable(struct nvme_qpair *qpair)
575{
576
577 nvme_qpair_enable(qpair);
578}
579
580void
581nvme_io_qpair_enable(struct nvme_qpair *qpair)
582{
583 STAILQ_HEAD(, nvme_request) temp;
584 struct nvme_tracker *tr;
585 struct nvme_request *req;
586
587 mtx_lock(&qpair->lock);
588
589 nvme_qpair_enable(qpair);
590
591 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
592 nvme_qpair_submit_tracker(qpair, tr);
593
594 STAILQ_INIT(&temp);
595 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
596
597 while (!STAILQ_EMPTY(&temp)) {
598 req = STAILQ_FIRST(&temp);
599 STAILQ_REMOVE_HEAD(&temp, stailq);
600 _nvme_qpair_submit_request(qpair, req);
601 }
602
603 mtx_unlock(&qpair->lock);
604}
605
606static void
607nvme_qpair_disable(struct nvme_qpair *qpair)
608{
609 struct nvme_tracker *tr;
610
611 qpair->is_enabled = FALSE;
612 mtx_lock(&qpair->lock);
613 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
614 callout_stop(&tr->timer);
615 mtx_unlock(&qpair->lock);
616}
617
618void
619nvme_admin_qpair_disable(struct nvme_qpair *qpair)
620{
621
622 nvme_qpair_disable(qpair);
623 nvme_admin_qpair_abort_aers(qpair);
624}
625
626void
627nvme_io_qpair_disable(struct nvme_qpair *qpair)
628{
629
630 nvme_qpair_disable(qpair);
631}