Deleted Added
full compact
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_qpair.c 248741 2013-03-26 18:45:16Z jimharris $");
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_qpair.c 248746 2013-03-26 19:50:46Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32
33#include <dev/pci/pcivar.h>
34
35#include "nvme_private.h"
36
37static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
38 struct nvme_request *req);
39
40static boolean_t
41nvme_completion_is_error(struct nvme_completion *cpl)
42{
43
44 return (cpl->sf_sc != 0 || cpl->sf_sct != 0);
45}
46
47static boolean_t
48nvme_completion_is_retry(const struct nvme_completion *cpl)
49{
50 /*
51 * TODO: spec is not clear how commands that are aborted due
52 * to TLER will be marked. So for now, it seems
53 * NAMESPACE_NOT_READY is the only case where we should
54 * look at the DNR bit.
55 */
56 switch (cpl->sf_sct) {
57 case NVME_SCT_GENERIC:
58 switch (cpl->sf_sc) {
59 case NVME_SC_ABORTED_BY_REQUEST:
60 return (1);
61 case NVME_SC_NAMESPACE_NOT_READY:
62 if (cpl->sf_dnr)
63 return (0);
64 else
65 return (1);
66 case NVME_SC_INVALID_OPCODE:
67 case NVME_SC_INVALID_FIELD:
68 case NVME_SC_COMMAND_ID_CONFLICT:
69 case NVME_SC_DATA_TRANSFER_ERROR:
70 case NVME_SC_ABORTED_POWER_LOSS:
71 case NVME_SC_INTERNAL_DEVICE_ERROR:
72 case NVME_SC_ABORTED_SQ_DELETION:
73 case NVME_SC_ABORTED_FAILED_FUSED:
74 case NVME_SC_ABORTED_MISSING_FUSED:
75 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
76 case NVME_SC_COMMAND_SEQUENCE_ERROR:
77 case NVME_SC_LBA_OUT_OF_RANGE:
78 case NVME_SC_CAPACITY_EXCEEDED:
79 default:
80 return (0);
81 }
82 case NVME_SCT_COMMAND_SPECIFIC:
83 case NVME_SCT_MEDIA_ERROR:
84 case NVME_SCT_VENDOR_SPECIFIC:
85 default:
86 return (0);
87 }
88}
89
90static struct nvme_tracker *
91nvme_qpair_find_tracker(struct nvme_qpair *qpair, struct nvme_request *req)
92{
93 struct nvme_tracker *tr;
94 uint32_t i;
95
96 KASSERT(req != NULL, ("%s: called with NULL req\n", __func__));
97
98 for (i = 0; i < qpair->num_entries; ++i) {
99 tr = qpair->act_tr[i];
100 if (tr != NULL && tr->req == req)
101 return (tr);
102 }
103
104 return (NULL);
105}
106
90static void
91nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
92 uint16_t cid)
93{
94
95 bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
96 bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
97
98 bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp,
99 sizeof(tr->prp), nvme_single_map, &tr->prp_bus_addr, 0);
100
101 callout_init_mtx(&tr->timer, &qpair->lock, 0);
102 tr->cid = cid;
103 tr->qpair = qpair;
104}
105
106static void
107nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
108 struct nvme_completion *cpl, boolean_t print_on_error)
109{
110 struct nvme_request *req;
111 boolean_t retry, error;
112
113 req = tr->req;
114 error = nvme_completion_is_error(cpl);
115 retry = error && nvme_completion_is_retry(cpl);
116
117 if (error && print_on_error) {
118 nvme_dump_completion(cpl);
119 nvme_dump_command(&req->cmd);
120 }
121
122 qpair->act_tr[cpl->cid] = NULL;
123
124 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
125
126 if (req->cb_fn && !retry)
127 req->cb_fn(req->cb_arg, cpl);
128
129 mtx_lock(&qpair->lock);
130 callout_stop(&tr->timer);
131
132 if (retry)
150 nvme_qpair_submit_cmd(qpair, tr);
133 nvme_qpair_submit_tracker(qpair, tr);
134 else {
135 if (req->payload_size > 0 || req->uio != NULL)
136 bus_dmamap_unload(qpair->dma_tag,
137 tr->payload_dma_map);
138
139 nvme_free_request(req);
140 tr->req = NULL;
141
142 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
143 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
144
145 if (!STAILQ_EMPTY(&qpair->queued_req)) {
146 req = STAILQ_FIRST(&qpair->queued_req);
147 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
148 _nvme_qpair_submit_request(qpair, req);
149 }
150 }
151
152 mtx_unlock(&qpair->lock);
153}
154
155static void
156nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
157 struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
158 boolean_t print_on_error)
159{
160 struct nvme_completion cpl;
161
162 memset(&cpl, 0, sizeof(cpl));
163 cpl.sqid = qpair->id;
164 cpl.cid = tr->cid;
165 cpl.sf_sct = sct;
166 cpl.sf_sc = sc;
167 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
168}
169
170void
171nvme_qpair_process_completions(struct nvme_qpair *qpair)
172{
173 struct nvme_tracker *tr;
174 struct nvme_completion *cpl;
175
176 qpair->num_intr_handler_calls++;
177
178 if (!qpair->is_enabled)
179 /*
180 * qpair is not enabled, likely because a controller reset is
181 * is in progress. Ignore the interrupt - any I/O that was
182 * associated with this interrupt will get retried when the
183 * reset is complete.
184 */
185 return;
186
187 while (1) {
188 cpl = &qpair->cpl[qpair->cq_head];
189
190 if (cpl->p != qpair->phase)
191 break;
192
193 tr = qpair->act_tr[cpl->cid];
194
195 if (tr != NULL) {
196 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE);
197 qpair->sq_head = cpl->sqhd;
198 } else {
199 printf("cpl does not map to outstanding cmd\n");
200 nvme_dump_completion(cpl);
201 KASSERT(0, ("received completion for unknown cmd\n"));
202 }
203
204 if (++qpair->cq_head == qpair->num_entries) {
205 qpair->cq_head = 0;
206 qpair->phase = !qpair->phase;
207 }
208
209 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
210 qpair->cq_head);
211 }
212}
213
214static void
215nvme_qpair_msix_handler(void *arg)
216{
217 struct nvme_qpair *qpair = arg;
218
219 nvme_qpair_process_completions(qpair);
220}
221
222void
223nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
224 uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
225 uint32_t max_xfer_size, struct nvme_controller *ctrlr)
226{
227 struct nvme_tracker *tr;
228 uint32_t i;
229
230 qpair->id = id;
231 qpair->vector = vector;
232 qpair->num_entries = num_entries;
233#ifdef CHATHAM2
234 /*
235 * Chatham prototype board starts having issues at higher queue
236 * depths. So use a conservative estimate here of no more than 64
237 * outstanding I/O per queue at any one point.
238 */
239 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
240 num_trackers = min(num_trackers, 64);
241#endif
242 qpair->num_trackers = num_trackers;
243 qpair->max_xfer_size = max_xfer_size;
244 qpair->ctrlr = ctrlr;
245
239 /*
240 * First time through the completion queue, HW will set phase
241 * bit on completions to 1. So set this to 1 here, indicating
242 * we're looking for a 1 to know which entries have completed.
243 * we'll toggle the bit each time when the completion queue
244 * rolls over.
245 */
246 qpair->phase = 1;
247
246 if (ctrlr->msix_enabled) {
247
248 /*
249 * MSI-X vector resource IDs start at 1, so we add one to
250 * the queue's vector to get the corresponding rid to use.
251 */
252 qpair->rid = vector + 1;
253
254 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
255 &qpair->rid, RF_ACTIVE);
256
257 bus_setup_intr(ctrlr->dev, qpair->res,
258 INTR_TYPE_MISC | INTR_MPSAFE, NULL,
259 nvme_qpair_msix_handler, qpair, &qpair->tag);
260 }
261
262 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
263
264 bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
265 sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR,
266 BUS_SPACE_MAXADDR, NULL, NULL, qpair->max_xfer_size,
267 (qpair->max_xfer_size/PAGE_SIZE)+1, PAGE_SIZE, 0,
268 NULL, NULL, &qpair->dma_tag);
269
270 qpair->num_cmds = 0;
271 qpair->num_intr_handler_calls = 0;
274 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
272
273 /* TODO: error checking on contigmalloc, bus_dmamap_load calls */
274 qpair->cmd = contigmalloc(qpair->num_entries *
275 sizeof(struct nvme_command), M_NVME, M_ZERO | M_NOWAIT,
276 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
277 qpair->cpl = contigmalloc(qpair->num_entries *
278 sizeof(struct nvme_completion), M_NVME, M_ZERO | M_NOWAIT,
279 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
280
281 bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
282 bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
283
284 bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map,
285 qpair->cmd, qpair->num_entries * sizeof(struct nvme_command),
286 nvme_single_map, &qpair->cmd_bus_addr, 0);
287 bus_dmamap_load(qpair->dma_tag, qpair->cpl_dma_map,
288 qpair->cpl, qpair->num_entries * sizeof(struct nvme_completion),
289 nvme_single_map, &qpair->cpl_bus_addr, 0);
290
291 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
292 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
293
294 TAILQ_INIT(&qpair->free_tr);
295 TAILQ_INIT(&qpair->outstanding_tr);
296 STAILQ_INIT(&qpair->queued_req);
297
298 for (i = 0; i < qpair->num_trackers; i++) {
299 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_NOWAIT);
300
301 if (tr == NULL) {
302 printf("warning: nvme tracker malloc failed\n");
303 break;
304 }
305
306 nvme_qpair_construct_tracker(qpair, tr, i);
307 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
308 }
309
310 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * qpair->num_entries,
311 M_NVME, M_ZERO | M_NOWAIT);
312}
313
314static void
315nvme_qpair_destroy(struct nvme_qpair *qpair)
316{
317 struct nvme_tracker *tr;
318
319 if (qpair->tag)
320 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
321
322 if (qpair->res)
323 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
324 rman_get_rid(qpair->res), qpair->res);
325
326 if (qpair->dma_tag)
327 bus_dma_tag_destroy(qpair->dma_tag);
328
329 if (qpair->act_tr)
330 free(qpair->act_tr, M_NVME);
331
332 while (!TAILQ_EMPTY(&qpair->free_tr)) {
333 tr = TAILQ_FIRST(&qpair->free_tr);
334 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
335 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
336 bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
337 free(tr, M_NVME);
338 }
339}
340
341static void
342nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
343{
344 struct nvme_tracker *tr;
345
346 tr = TAILQ_FIRST(&qpair->outstanding_tr);
347 while (tr != NULL) {
348 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
349 nvme_qpair_manual_complete_tracker(qpair, tr,
350 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION,
351 FALSE);
352 tr = TAILQ_FIRST(&qpair->outstanding_tr);
353 } else {
354 tr = TAILQ_NEXT(tr, tailq);
355 }
356 }
357}
358
359void
360nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
361{
362
363 nvme_admin_qpair_abort_aers(qpair);
364
365 /*
366 * For NVMe, you don't send delete queue commands for the admin
367 * queue, so we just need to unload and free the cmd and cpl memory.
368 */
369 bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
370 bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
371
372 contigfree(qpair->cmd,
373 qpair->num_entries * sizeof(struct nvme_command), M_NVME);
374
375 bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
376 bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
377 contigfree(qpair->cpl,
378 qpair->num_entries * sizeof(struct nvme_completion), M_NVME);
379
380 nvme_qpair_destroy(qpair);
381}
382
383static void
384nvme_free_cmd_ring(void *arg, const struct nvme_completion *status)
385{
386 struct nvme_qpair *qpair;
387
388 qpair = (struct nvme_qpair *)arg;
389 bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
390 bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
391 contigfree(qpair->cmd,
392 qpair->num_entries * sizeof(struct nvme_command), M_NVME);
393 qpair->cmd = NULL;
394}
395
396static void
397nvme_free_cpl_ring(void *arg, const struct nvme_completion *status)
398{
399 struct nvme_qpair *qpair;
400
401 qpair = (struct nvme_qpair *)arg;
402 bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
403 bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
404 contigfree(qpair->cpl,
405 qpair->num_entries * sizeof(struct nvme_completion), M_NVME);
406 qpair->cpl = NULL;
407}
408
409void
410nvme_io_qpair_destroy(struct nvme_qpair *qpair)
411{
412 struct nvme_controller *ctrlr = qpair->ctrlr;
413
414 if (qpair->num_entries > 0) {
415
416 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_free_cmd_ring,
417 qpair);
418 /* Spin until free_cmd_ring sets qpair->cmd to NULL. */
419 while (qpair->cmd)
420 DELAY(5);
421
422 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_free_cpl_ring,
423 qpair);
424 /* Spin until free_cpl_ring sets qpair->cmd to NULL. */
425 while (qpair->cpl)
426 DELAY(5);
427
428 nvme_qpair_destroy(qpair);
429 }
430}
431
432static void
416nvme_qpair_manual_abort_tracker(struct nvme_qpair *qpair,
417 struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
418 boolean_t print_on_error)
419{
420 struct nvme_completion cpl;
421
422 memset(&cpl, 0, sizeof(cpl));
423 cpl.sqid = qpair->id;
424 cpl.cid = tr->cid;
425 cpl.sf_sct = sct;
426 cpl.sf_sc = sc;
427 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
428}
429
430void
431nvme_qpair_manual_abort_request(struct nvme_qpair *qpair,
432 struct nvme_request *req, uint32_t sct, uint32_t sc,
433 boolean_t print_on_error)
434{
435 struct nvme_tracker *tr;
436
437 tr = nvme_qpair_find_tracker(qpair, req);
438
439 if (tr == NULL) {
440 printf("%s: request not found\n", __func__);
441 nvme_dump_command(&req->cmd);
442 return;
443 }
444
445 nvme_qpair_manual_abort_tracker(qpair, tr, sct, sc, print_on_error);
446}
447
448static void
433nvme_abort_complete(void *arg, const struct nvme_completion *status)
434{
435 struct nvme_tracker *tr = arg;
436
437 /*
438 * If cdw0 == 1, the controller was not able to abort the command
439 * we requested. We still need to check the active tracker array,
440 * to cover race where I/O timed out at same time controller was
441 * completing the I/O.
442 */
443 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
444 /*
445 * An I/O has timed out, and the controller was unable to
446 * abort it for some reason. Construct a fake completion
447 * status, and then complete the I/O's tracker manually.
448 */
449 printf("abort command failed, aborting command manually\n");
466 nvme_qpair_manual_abort_tracker(tr->qpair, tr,
450 nvme_qpair_manual_complete_tracker(tr->qpair, tr,
451 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
452 }
453}
454
455static void
456nvme_timeout(void *arg)
457{
458 struct nvme_tracker *tr = arg;
459
460 nvme_ctrlr_cmd_abort(tr->qpair->ctrlr, tr->cid, tr->qpair->id,
461 nvme_abort_complete, tr);
462}
463
464void
481nvme_qpair_submit_cmd(struct nvme_qpair *qpair, struct nvme_tracker *tr)
465nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
466{
467 struct nvme_request *req;
468
469 mtx_assert(&qpair->lock, MA_OWNED);
470
471 req = tr->req;
472 req->cmd.cid = tr->cid;
473 qpair->act_tr[tr->cid] = tr;
474
475 if (req->timeout > 0)
476#if __FreeBSD_version >= 800030
477 callout_reset_curcpu(&tr->timer, req->timeout * hz,
478 nvme_timeout, tr);
479#else
480 callout_reset(&tr->timer, req->timeout * hz, nvme_timeout, tr);
481#endif
482
483 /* Copy the command from the tracker to the submission queue. */
484 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
485
486 if (++qpair->sq_tail == qpair->num_entries)
487 qpair->sq_tail = 0;
488
489 wmb();
490 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
491 qpair->sq_tail);
492
493 qpair->num_cmds++;
494}
495
496static void
497_nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
498{
499 struct nvme_tracker *tr;
500 int err;
501
502 mtx_assert(&qpair->lock, MA_OWNED);
503
504 tr = TAILQ_FIRST(&qpair->free_tr);
505
520 if (tr == NULL) {
506 if (tr == NULL || !qpair->is_enabled) {
507 /*
522 * No tracker is available. Put the request on the qpair's
523 * request queue to be processed when a tracker frees up
524 * via a command completion.
508 * No tracker is available, or the qpair is disabled due to
509 * an in-progress controller-level reset.
510 *
511 * Put the request on the qpair's request queue to be processed
512 * when a tracker frees up via a command completion or when
513 * the controller reset is completed.
514 */
515 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
516 return;
517 }
518
519 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
520 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
521 tr->req = req;
522
523 if (req->uio == NULL) {
524 if (req->payload_size > 0) {
525 err = bus_dmamap_load(tr->qpair->dma_tag,
526 tr->payload_dma_map, req->payload,
527 req->payload_size,
528 nvme_payload_map, tr, 0);
529 if (err != 0)
530 panic("bus_dmamap_load returned non-zero!\n");
531 } else
543 nvme_qpair_submit_cmd(tr->qpair, tr);
532 nvme_qpair_submit_tracker(tr->qpair, tr);
533 } else {
534 err = bus_dmamap_load_uio(tr->qpair->dma_tag,
535 tr->payload_dma_map, req->uio,
536 nvme_payload_map_uio, tr, 0);
537 if (err != 0)
538 panic("bus_dmamap_load returned non-zero!\n");
539 }
540}
541
542void
543nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
544{
545
546 mtx_lock(&qpair->lock);
547 _nvme_qpair_submit_request(qpair, req);
548 mtx_unlock(&qpair->lock);
549}
550
551static void
552nvme_qpair_enable(struct nvme_qpair *qpair)
553{
554
555 qpair->is_enabled = TRUE;
556 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
557
558 /*
559 * First time through the completion queue, HW will set phase
560 * bit on completions to 1. So set this to 1 here, indicating
561 * we're looking for a 1 to know which entries have completed.
562 * we'll toggle the bit each time when the completion queue
563 * rolls over.
564 */
565 qpair->phase = 1;
566
567 memset(qpair->cmd, 0,
568 qpair->num_entries * sizeof(struct nvme_command));
569 memset(qpair->cpl, 0,
570 qpair->num_entries * sizeof(struct nvme_completion));
571}
572
573void
574nvme_admin_qpair_enable(struct nvme_qpair *qpair)
575{
576
577 nvme_qpair_enable(qpair);
578}
579
580void
581nvme_io_qpair_enable(struct nvme_qpair *qpair)
582{
583 STAILQ_HEAD(, nvme_request) temp;
584 struct nvme_tracker *tr;
585 struct nvme_request *req;
586
587 mtx_lock(&qpair->lock);
588
589 nvme_qpair_enable(qpair);
590
591 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
592 nvme_qpair_submit_tracker(qpair, tr);
593
594 STAILQ_INIT(&temp);
595 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
596
597 while (!STAILQ_EMPTY(&temp)) {
598 req = STAILQ_FIRST(&temp);
599 STAILQ_REMOVE_HEAD(&temp, stailq);
600 _nvme_qpair_submit_request(qpair, req);
601 }
602
603 mtx_unlock(&qpair->lock);
604}
605
606static void
607nvme_qpair_disable(struct nvme_qpair *qpair)
608{
609 struct nvme_tracker *tr;
610
611 qpair->is_enabled = FALSE;
612 mtx_lock(&qpair->lock);
613 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
614 callout_stop(&tr->timer);
615 mtx_unlock(&qpair->lock);
616}
617
618void
619nvme_admin_qpair_disable(struct nvme_qpair *qpair)
620{
621
622 nvme_qpair_disable(qpair);
623 nvme_admin_qpair_abort_aers(qpair);
624}
625
626void
627nvme_io_qpair_disable(struct nvme_qpair *qpair)
628{
629
630 nvme_qpair_disable(qpair);
631}