nvme_ctrlr.c revision 346246
1/*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/nvme/nvme_ctrlr.c 346246 2019-04-15 16:57:27Z mav $"); 29 30#include "opt_cam.h" 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/buf.h> 35#include <sys/bus.h> 36#include <sys/conf.h> 37#include <sys/ioccom.h> 38#include <sys/proc.h> 39#include <sys/smp.h> 40#include <sys/uio.h> 41 42#include <dev/pci/pcireg.h> 43#include <dev/pci/pcivar.h> 44 45#include "nvme_private.h" 46 47#define B4_CHK_RDY_DELAY_MS 2300 /* work arond controller bug */ 48 49static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 50 struct nvme_async_event_request *aer); 51static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 52 53static int 54nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 55{ 56 57 ctrlr->resource_id = PCIR_BAR(0); 58 59 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 60 &ctrlr->resource_id, RF_ACTIVE); 61 62 if(ctrlr->resource == NULL) { 63 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 64 return (ENOMEM); 65 } 66 67 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 68 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 69 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 70 71 /* 72 * The NVMe spec allows for the MSI-X table to be placed behind 73 * BAR 4/5, separate from the control/doorbell registers. Always 74 * try to map this bar, because it must be mapped prior to calling 75 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 76 * bus_alloc_resource() will just return NULL which is OK. 77 */ 78 ctrlr->bar4_resource_id = PCIR_BAR(4); 79 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 80 &ctrlr->bar4_resource_id, RF_ACTIVE); 81 82 return (0); 83} 84 85static int 86nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 87{ 88 struct nvme_qpair *qpair; 89 uint32_t num_entries; 90 int error; 91 92 qpair = &ctrlr->adminq; 93 94 num_entries = NVME_ADMIN_ENTRIES; 95 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 96 /* 97 * If admin_entries was overridden to an invalid value, revert it 98 * back to our default value. 99 */ 100 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 101 num_entries > NVME_MAX_ADMIN_ENTRIES) { 102 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 103 "specified\n", num_entries); 104 num_entries = NVME_ADMIN_ENTRIES; 105 } 106 107 /* 108 * The admin queue's max xfer size is treated differently than the 109 * max I/O xfer size. 16KB is sufficient here - maybe even less? 110 */ 111 error = nvme_qpair_construct(qpair, 112 0, /* qpair ID */ 113 0, /* vector */ 114 num_entries, 115 NVME_ADMIN_TRACKERS, 116 ctrlr); 117 return (error); 118} 119 120static int 121nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 122{ 123 struct nvme_qpair *qpair; 124 union cap_lo_register cap_lo; 125 int i, error, num_entries, num_trackers; 126 127 num_entries = NVME_IO_ENTRIES; 128 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 129 130 /* 131 * NVMe spec sets a hard limit of 64K max entries, but 132 * devices may specify a smaller limit, so we need to check 133 * the MQES field in the capabilities register. 134 */ 135 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 136 num_entries = min(num_entries, cap_lo.bits.mqes+1); 137 138 num_trackers = NVME_IO_TRACKERS; 139 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 140 141 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 142 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 143 /* 144 * No need to have more trackers than entries in the submit queue. 145 * Note also that for a queue size of N, we can only have (N-1) 146 * commands outstanding, hence the "-1" here. 147 */ 148 num_trackers = min(num_trackers, (num_entries-1)); 149 150 /* 151 * Our best estimate for the maximum number of I/Os that we should 152 * noramlly have in flight at one time. This should be viewed as a hint, 153 * not a hard limit and will need to be revisitted when the upper layers 154 * of the storage system grows multi-queue support. 155 */ 156 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 157 158 /* 159 * This was calculated previously when setting up interrupts, but 160 * a controller could theoretically support fewer I/O queues than 161 * MSI-X vectors. So calculate again here just to be safe. 162 */ 163 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues); 164 165 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 166 M_NVME, M_ZERO | M_WAITOK); 167 168 for (i = 0; i < ctrlr->num_io_queues; i++) { 169 qpair = &ctrlr->ioq[i]; 170 171 /* 172 * Admin queue has ID=0. IO queues start at ID=1 - 173 * hence the 'i+1' here. 174 * 175 * For I/O queues, use the controller-wide max_xfer_size 176 * calculated in nvme_attach(). 177 */ 178 error = nvme_qpair_construct(qpair, 179 i+1, /* qpair ID */ 180 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 181 num_entries, 182 num_trackers, 183 ctrlr); 184 if (error) 185 return (error); 186 187 /* 188 * Do not bother binding interrupts if we only have one I/O 189 * interrupt thread for this controller. 190 */ 191 if (ctrlr->num_io_queues > 1) 192 bus_bind_intr(ctrlr->dev, qpair->res, 193 i * ctrlr->num_cpus_per_ioq); 194 } 195 196 return (0); 197} 198 199static void 200nvme_ctrlr_fail(struct nvme_controller *ctrlr) 201{ 202 int i; 203 204 ctrlr->is_failed = TRUE; 205 nvme_qpair_fail(&ctrlr->adminq); 206 if (ctrlr->ioq != NULL) { 207 for (i = 0; i < ctrlr->num_io_queues; i++) 208 nvme_qpair_fail(&ctrlr->ioq[i]); 209 } 210 nvme_notify_fail_consumers(ctrlr); 211} 212 213void 214nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 215 struct nvme_request *req) 216{ 217 218 mtx_lock(&ctrlr->lock); 219 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 220 mtx_unlock(&ctrlr->lock); 221 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 222} 223 224static void 225nvme_ctrlr_fail_req_task(void *arg, int pending) 226{ 227 struct nvme_controller *ctrlr = arg; 228 struct nvme_request *req; 229 230 mtx_lock(&ctrlr->lock); 231 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) { 232 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 233 mtx_unlock(&ctrlr->lock); 234 nvme_qpair_manual_complete_request(req->qpair, req, 235 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 236 mtx_lock(&ctrlr->lock); 237 } 238 mtx_unlock(&ctrlr->lock); 239} 240 241static int 242nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 243{ 244 int ms_waited; 245 union csts_register csts; 246 247 csts.raw = nvme_mmio_read_4(ctrlr, csts); 248 249 ms_waited = 0; 250 while (csts.bits.rdy != desired_val) { 251 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 252 nvme_printf(ctrlr, "controller ready did not become %d " 253 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 254 return (ENXIO); 255 } 256 DELAY(1000); 257 csts.raw = nvme_mmio_read_4(ctrlr, csts); 258 } 259 260 return (0); 261} 262 263static int 264nvme_ctrlr_disable(struct nvme_controller *ctrlr) 265{ 266 union cc_register cc; 267 union csts_register csts; 268 int err; 269 270 cc.raw = nvme_mmio_read_4(ctrlr, cc); 271 csts.raw = nvme_mmio_read_4(ctrlr, csts); 272 273 /* 274 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 275 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 276 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 277 * isn't the desired value. Short circuit if we're already disabled. 278 */ 279 if (cc.bits.en == 1) { 280 if (csts.bits.rdy == 0) { 281 /* EN == 1, wait for RDY == 1 or fail */ 282 err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 283 if (err != 0) 284 return (err); 285 } 286 } else { 287 /* EN == 0 already wait for RDY == 0 */ 288 if (csts.bits.rdy == 0) 289 return (0); 290 else 291 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 292 } 293 294 cc.bits.en = 0; 295 nvme_mmio_write_4(ctrlr, cc, cc.raw); 296 /* 297 * Some drives have issues with accessing the mmio after we 298 * disable, so delay for a bit after we write the bit to 299 * cope with these issues. 300 */ 301 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 302 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); 303 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 304} 305 306static int 307nvme_ctrlr_enable(struct nvme_controller *ctrlr) 308{ 309 union cc_register cc; 310 union csts_register csts; 311 union aqa_register aqa; 312 int err; 313 314 cc.raw = nvme_mmio_read_4(ctrlr, cc); 315 csts.raw = nvme_mmio_read_4(ctrlr, csts); 316 317 /* 318 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 319 */ 320 if (cc.bits.en == 1) { 321 if (csts.bits.rdy == 1) 322 return (0); 323 else 324 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 325 } else { 326 /* EN == 0 already wait for RDY == 0 or fail */ 327 err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 328 if (err != 0) 329 return (err); 330 } 331 332 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 333 DELAY(5000); 334 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 335 DELAY(5000); 336 337 aqa.raw = 0; 338 /* acqs and asqs are 0-based. */ 339 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 340 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 341 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 342 DELAY(5000); 343 344 cc.bits.en = 1; 345 cc.bits.css = 0; 346 cc.bits.ams = 0; 347 cc.bits.shn = 0; 348 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 349 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 350 351 /* This evaluates to 0, which is according to spec. */ 352 cc.bits.mps = (PAGE_SIZE >> 13); 353 354 nvme_mmio_write_4(ctrlr, cc, cc.raw); 355 356 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 357} 358 359int 360nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 361{ 362 int i, err; 363 364 nvme_admin_qpair_disable(&ctrlr->adminq); 365 /* 366 * I/O queues are not allocated before the initial HW 367 * reset, so do not try to disable them. Use is_initialized 368 * to determine if this is the initial HW reset. 369 */ 370 if (ctrlr->is_initialized) { 371 for (i = 0; i < ctrlr->num_io_queues; i++) 372 nvme_io_qpair_disable(&ctrlr->ioq[i]); 373 } 374 375 DELAY(100*1000); 376 377 err = nvme_ctrlr_disable(ctrlr); 378 if (err != 0) 379 return err; 380 return (nvme_ctrlr_enable(ctrlr)); 381} 382 383void 384nvme_ctrlr_reset(struct nvme_controller *ctrlr) 385{ 386 int cmpset; 387 388 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 389 390 if (cmpset == 0 || ctrlr->is_failed) 391 /* 392 * Controller is already resetting or has failed. Return 393 * immediately since there is no need to kick off another 394 * reset in these cases. 395 */ 396 return; 397 398 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 399} 400 401static int 402nvme_ctrlr_identify(struct nvme_controller *ctrlr) 403{ 404 struct nvme_completion_poll_status status; 405 406 status.done = 0; 407 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 408 nvme_completion_poll_cb, &status); 409 while (!atomic_load_acq_int(&status.done)) 410 pause("nvme", 1); 411 if (nvme_completion_is_error(&status.cpl)) { 412 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 413 return (ENXIO); 414 } 415 416 /* 417 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 418 * controller supports. 419 */ 420 if (ctrlr->cdata.mdts > 0) 421 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 422 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 423 424 return (0); 425} 426 427static int 428nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 429{ 430 struct nvme_completion_poll_status status; 431 int cq_allocated, sq_allocated; 432 433 status.done = 0; 434 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 435 nvme_completion_poll_cb, &status); 436 while (!atomic_load_acq_int(&status.done)) 437 pause("nvme", 1); 438 if (nvme_completion_is_error(&status.cpl)) { 439 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 440 return (ENXIO); 441 } 442 443 /* 444 * Data in cdw0 is 0-based. 445 * Lower 16-bits indicate number of submission queues allocated. 446 * Upper 16-bits indicate number of completion queues allocated. 447 */ 448 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 449 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 450 451 /* 452 * Controller may allocate more queues than we requested, 453 * so use the minimum of the number requested and what was 454 * actually allocated. 455 */ 456 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 457 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 458 459 return (0); 460} 461 462static int 463nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 464{ 465 struct nvme_completion_poll_status status; 466 struct nvme_qpair *qpair; 467 int i; 468 469 for (i = 0; i < ctrlr->num_io_queues; i++) { 470 qpair = &ctrlr->ioq[i]; 471 472 status.done = 0; 473 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 474 nvme_completion_poll_cb, &status); 475 while (!atomic_load_acq_int(&status.done)) 476 pause("nvme", 1); 477 if (nvme_completion_is_error(&status.cpl)) { 478 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 479 return (ENXIO); 480 } 481 482 status.done = 0; 483 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 484 nvme_completion_poll_cb, &status); 485 while (!atomic_load_acq_int(&status.done)) 486 pause("nvme", 1); 487 if (nvme_completion_is_error(&status.cpl)) { 488 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 489 return (ENXIO); 490 } 491 } 492 493 return (0); 494} 495 496static int 497nvme_ctrlr_destroy_qpair(struct nvme_controller *ctrlr, struct nvme_qpair *qpair) 498{ 499 struct nvme_completion_poll_status status; 500 501 status.done = 0; 502 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, 503 nvme_completion_poll_cb, &status); 504 while (!atomic_load_acq_int(&status.done)) 505 pause("nvme", 1); 506 if (nvme_completion_is_error(&status.cpl)) { 507 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n"); 508 return (ENXIO); 509 } 510 511 status.done = 0; 512 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, 513 nvme_completion_poll_cb, &status); 514 while (!atomic_load_acq_int(&status.done)) 515 pause("nvme", 1); 516 if (nvme_completion_is_error(&status.cpl)) { 517 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n"); 518 return (ENXIO); 519 } 520 521 return (0); 522} 523 524static int 525nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 526{ 527 struct nvme_namespace *ns; 528 uint32_t i; 529 530 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 531 ns = &ctrlr->ns[i]; 532 nvme_ns_construct(ns, i+1, ctrlr); 533 } 534 535 return (0); 536} 537 538static boolean_t 539is_log_page_id_valid(uint8_t page_id) 540{ 541 542 switch (page_id) { 543 case NVME_LOG_ERROR: 544 case NVME_LOG_HEALTH_INFORMATION: 545 case NVME_LOG_FIRMWARE_SLOT: 546 return (TRUE); 547 } 548 549 return (FALSE); 550} 551 552static uint32_t 553nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 554{ 555 uint32_t log_page_size; 556 557 switch (page_id) { 558 case NVME_LOG_ERROR: 559 log_page_size = min( 560 sizeof(struct nvme_error_information_entry) * 561 ctrlr->cdata.elpe, 562 NVME_MAX_AER_LOG_SIZE); 563 break; 564 case NVME_LOG_HEALTH_INFORMATION: 565 log_page_size = sizeof(struct nvme_health_information_page); 566 break; 567 case NVME_LOG_FIRMWARE_SLOT: 568 log_page_size = sizeof(struct nvme_firmware_page); 569 break; 570 default: 571 log_page_size = 0; 572 break; 573 } 574 575 return (log_page_size); 576} 577 578static void 579nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 580 union nvme_critical_warning_state state) 581{ 582 583 if (state.bits.available_spare == 1) 584 nvme_printf(ctrlr, "available spare space below threshold\n"); 585 586 if (state.bits.temperature == 1) 587 nvme_printf(ctrlr, "temperature above threshold\n"); 588 589 if (state.bits.device_reliability == 1) 590 nvme_printf(ctrlr, "device reliability degraded\n"); 591 592 if (state.bits.read_only == 1) 593 nvme_printf(ctrlr, "media placed in read only mode\n"); 594 595 if (state.bits.volatile_memory_backup == 1) 596 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 597 598 if (state.bits.reserved != 0) 599 nvme_printf(ctrlr, 600 "unknown critical warning(s): state = 0x%02x\n", state.raw); 601} 602 603static void 604nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 605{ 606 struct nvme_async_event_request *aer = arg; 607 struct nvme_health_information_page *health_info; 608 609 /* 610 * If the log page fetch for some reason completed with an error, 611 * don't pass log page data to the consumers. In practice, this case 612 * should never happen. 613 */ 614 if (nvme_completion_is_error(cpl)) 615 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 616 aer->log_page_id, NULL, 0); 617 else { 618 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 619 health_info = (struct nvme_health_information_page *) 620 aer->log_page_buffer; 621 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 622 health_info->critical_warning); 623 /* 624 * Critical warnings reported through the 625 * SMART/health log page are persistent, so 626 * clear the associated bits in the async event 627 * config so that we do not receive repeated 628 * notifications for the same event. 629 */ 630 aer->ctrlr->async_event_config.raw &= 631 ~health_info->critical_warning.raw; 632 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 633 aer->ctrlr->async_event_config, NULL, NULL); 634 } 635 636 637 /* 638 * Pass the cpl data from the original async event completion, 639 * not the log page fetch. 640 */ 641 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 642 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 643 } 644 645 /* 646 * Repost another asynchronous event request to replace the one 647 * that just completed. 648 */ 649 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 650} 651 652static void 653nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 654{ 655 struct nvme_async_event_request *aer = arg; 656 657 if (nvme_completion_is_error(cpl)) { 658 /* 659 * Do not retry failed async event requests. This avoids 660 * infinite loops where a new async event request is submitted 661 * to replace the one just failed, only to fail again and 662 * perpetuate the loop. 663 */ 664 return; 665 } 666 667 /* Associated log page is in bits 23:16 of completion entry dw0. */ 668 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 669 670 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 671 aer->log_page_id); 672 673 if (is_log_page_id_valid(aer->log_page_id)) { 674 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 675 aer->log_page_id); 676 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 677 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 678 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 679 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 680 aer); 681 /* Wait to notify consumers until after log page is fetched. */ 682 } else { 683 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 684 NULL, 0); 685 686 /* 687 * Repost another asynchronous event request to replace the one 688 * that just completed. 689 */ 690 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 691 } 692} 693 694static void 695nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 696 struct nvme_async_event_request *aer) 697{ 698 struct nvme_request *req; 699 700 aer->ctrlr = ctrlr; 701 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 702 aer->req = req; 703 704 /* 705 * Disable timeout here, since asynchronous event requests should by 706 * nature never be timed out. 707 */ 708 req->timeout = FALSE; 709 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 710 nvme_ctrlr_submit_admin_request(ctrlr, req); 711} 712 713static void 714nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 715{ 716 struct nvme_completion_poll_status status; 717 struct nvme_async_event_request *aer; 718 uint32_t i; 719 720 ctrlr->async_event_config.raw = 0xFF; 721 ctrlr->async_event_config.bits.reserved = 0; 722 723 status.done = 0; 724 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 725 0, NULL, 0, nvme_completion_poll_cb, &status); 726 while (!atomic_load_acq_int(&status.done)) 727 pause("nvme", 1); 728 if (nvme_completion_is_error(&status.cpl) || 729 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 730 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 731 nvme_printf(ctrlr, "temperature threshold not supported\n"); 732 ctrlr->async_event_config.bits.temperature = 0; 733 } 734 735 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 736 ctrlr->async_event_config, NULL, NULL); 737 738 /* aerl is a zero-based value, so we need to add 1 here. */ 739 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 740 741 for (i = 0; i < ctrlr->num_aers; i++) { 742 aer = &ctrlr->aer[i]; 743 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 744 } 745} 746 747static void 748nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 749{ 750 751 ctrlr->int_coal_time = 0; 752 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 753 &ctrlr->int_coal_time); 754 755 ctrlr->int_coal_threshold = 0; 756 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 757 &ctrlr->int_coal_threshold); 758 759 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 760 ctrlr->int_coal_threshold, NULL, NULL); 761} 762 763static void 764nvme_ctrlr_start(void *ctrlr_arg) 765{ 766 struct nvme_controller *ctrlr = ctrlr_arg; 767 uint32_t old_num_io_queues; 768 int i; 769 770 /* 771 * Only reset adminq here when we are restarting the 772 * controller after a reset. During initialization, 773 * we have already submitted admin commands to get 774 * the number of I/O queues supported, so cannot reset 775 * the adminq again here. 776 */ 777 if (ctrlr->is_resetting) { 778 nvme_qpair_reset(&ctrlr->adminq); 779 } 780 781 for (i = 0; i < ctrlr->num_io_queues; i++) 782 nvme_qpair_reset(&ctrlr->ioq[i]); 783 784 nvme_admin_qpair_enable(&ctrlr->adminq); 785 786 if (nvme_ctrlr_identify(ctrlr) != 0) { 787 nvme_ctrlr_fail(ctrlr); 788 return; 789 } 790 791 /* 792 * The number of qpairs are determined during controller initialization, 793 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 794 * HW limit. We call SET_FEATURES again here so that it gets called 795 * after any reset for controllers that depend on the driver to 796 * explicit specify how many queues it will use. This value should 797 * never change between resets, so panic if somehow that does happen. 798 */ 799 if (ctrlr->is_resetting) { 800 old_num_io_queues = ctrlr->num_io_queues; 801 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 802 nvme_ctrlr_fail(ctrlr); 803 return; 804 } 805 806 if (old_num_io_queues != ctrlr->num_io_queues) { 807 panic("num_io_queues changed from %u to %u", 808 old_num_io_queues, ctrlr->num_io_queues); 809 } 810 } 811 812 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 813 nvme_ctrlr_fail(ctrlr); 814 return; 815 } 816 817 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 818 nvme_ctrlr_fail(ctrlr); 819 return; 820 } 821 822 nvme_ctrlr_configure_aer(ctrlr); 823 nvme_ctrlr_configure_int_coalescing(ctrlr); 824 825 for (i = 0; i < ctrlr->num_io_queues; i++) 826 nvme_io_qpair_enable(&ctrlr->ioq[i]); 827} 828 829void 830nvme_ctrlr_start_config_hook(void *arg) 831{ 832 struct nvme_controller *ctrlr = arg; 833 834 nvme_qpair_reset(&ctrlr->adminq); 835 nvme_admin_qpair_enable(&ctrlr->adminq); 836 837 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 838 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 839 nvme_ctrlr_start(ctrlr); 840 else 841 nvme_ctrlr_fail(ctrlr); 842 843 nvme_sysctl_initialize_ctrlr(ctrlr); 844 config_intrhook_disestablish(&ctrlr->config_hook); 845 846 ctrlr->is_initialized = 1; 847 nvme_notify_new_controller(ctrlr); 848} 849 850static void 851nvme_ctrlr_reset_task(void *arg, int pending) 852{ 853 struct nvme_controller *ctrlr = arg; 854 int status; 855 856 nvme_printf(ctrlr, "resetting controller\n"); 857 status = nvme_ctrlr_hw_reset(ctrlr); 858 /* 859 * Use pause instead of DELAY, so that we yield to any nvme interrupt 860 * handlers on this CPU that were blocked on a qpair lock. We want 861 * all nvme interrupts completed before proceeding with restarting the 862 * controller. 863 * 864 * XXX - any way to guarantee the interrupt handlers have quiesced? 865 */ 866 pause("nvmereset", hz / 10); 867 if (status == 0) 868 nvme_ctrlr_start(ctrlr); 869 else 870 nvme_ctrlr_fail(ctrlr); 871 872 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 873} 874 875/* 876 * Poll all the queues enabled on the device for completion. 877 */ 878void 879nvme_ctrlr_poll(struct nvme_controller *ctrlr) 880{ 881 int i; 882 883 nvme_qpair_process_completions(&ctrlr->adminq); 884 885 for (i = 0; i < ctrlr->num_io_queues; i++) 886 if (ctrlr->ioq && ctrlr->ioq[i].cpl) 887 nvme_qpair_process_completions(&ctrlr->ioq[i]); 888} 889 890/* 891 * Poll the single-vector intertrupt case: num_io_queues will be 1 and 892 * there's only a single vector. While we're polling, we mask further 893 * interrupts in the controller. 894 */ 895void 896nvme_ctrlr_intx_handler(void *arg) 897{ 898 struct nvme_controller *ctrlr = arg; 899 900 nvme_mmio_write_4(ctrlr, intms, 1); 901 nvme_ctrlr_poll(ctrlr); 902 nvme_mmio_write_4(ctrlr, intmc, 1); 903} 904 905static int 906nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 907{ 908 909 ctrlr->msix_enabled = 0; 910 ctrlr->num_io_queues = 1; 911 ctrlr->num_cpus_per_ioq = mp_ncpus; 912 ctrlr->rid = 0; 913 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 914 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 915 916 if (ctrlr->res == NULL) { 917 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 918 return (ENOMEM); 919 } 920 921 bus_setup_intr(ctrlr->dev, ctrlr->res, 922 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 923 ctrlr, &ctrlr->tag); 924 925 if (ctrlr->tag == NULL) { 926 nvme_printf(ctrlr, "unable to setup intx handler\n"); 927 return (ENOMEM); 928 } 929 930 return (0); 931} 932 933static void 934nvme_pt_done(void *arg, const struct nvme_completion *cpl) 935{ 936 struct nvme_pt_command *pt = arg; 937 struct mtx *mtx = pt->driver_lock; 938 939 bzero(&pt->cpl, sizeof(pt->cpl)); 940 pt->cpl.cdw0 = cpl->cdw0; 941 pt->cpl.status = cpl->status; 942 pt->cpl.status.p = 0; 943 944 mtx_lock(mtx); 945 pt->driver_lock = NULL; 946 wakeup(pt); 947 mtx_unlock(mtx); 948} 949 950int 951nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 952 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 953 int is_admin_cmd) 954{ 955 struct nvme_request *req; 956 struct mtx *mtx; 957 struct buf *buf = NULL; 958 int ret = 0; 959 vm_offset_t addr, end; 960 961 if (pt->len > 0) { 962 /* 963 * vmapbuf calls vm_fault_quick_hold_pages which only maps full 964 * pages. Ensure this request has fewer than MAXPHYS bytes when 965 * extended to full pages. 966 */ 967 addr = (vm_offset_t)pt->buf; 968 end = round_page(addr + pt->len); 969 addr = trunc_page(addr); 970 if (end - addr > MAXPHYS) 971 return EIO; 972 973 if (pt->len > ctrlr->max_xfer_size) { 974 nvme_printf(ctrlr, "pt->len (%d) " 975 "exceeds max_xfer_size (%d)\n", pt->len, 976 ctrlr->max_xfer_size); 977 return EIO; 978 } 979 if (is_user_buffer) { 980 /* 981 * Ensure the user buffer is wired for the duration of 982 * this passthrough command. 983 */ 984 PHOLD(curproc); 985 buf = getpbuf(NULL); 986 buf->b_data = pt->buf; 987 buf->b_bufsize = pt->len; 988 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 989 if (vmapbuf(buf, 1) < 0) { 990 ret = EFAULT; 991 goto err; 992 } 993 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 994 nvme_pt_done, pt); 995 } else 996 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 997 nvme_pt_done, pt); 998 } else 999 req = nvme_allocate_request_null(nvme_pt_done, pt); 1000 1001 req->cmd.opc = pt->cmd.opc; 1002 req->cmd.cdw10 = pt->cmd.cdw10; 1003 req->cmd.cdw11 = pt->cmd.cdw11; 1004 req->cmd.cdw12 = pt->cmd.cdw12; 1005 req->cmd.cdw13 = pt->cmd.cdw13; 1006 req->cmd.cdw14 = pt->cmd.cdw14; 1007 req->cmd.cdw15 = pt->cmd.cdw15; 1008 1009 req->cmd.nsid = nsid; 1010 1011 mtx = mtx_pool_find(mtxpool_sleep, pt); 1012 pt->driver_lock = mtx; 1013 1014 if (is_admin_cmd) 1015 nvme_ctrlr_submit_admin_request(ctrlr, req); 1016 else 1017 nvme_ctrlr_submit_io_request(ctrlr, req); 1018 1019 mtx_lock(mtx); 1020 while (pt->driver_lock != NULL) 1021 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 1022 mtx_unlock(mtx); 1023 1024err: 1025 if (buf != NULL) { 1026 relpbuf(buf, NULL); 1027 PRELE(curproc); 1028 } 1029 1030 return (ret); 1031} 1032 1033static int 1034nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1035 struct thread *td) 1036{ 1037 struct nvme_controller *ctrlr; 1038 struct nvme_pt_command *pt; 1039 1040 ctrlr = cdev->si_drv1; 1041 1042 switch (cmd) { 1043 case NVME_RESET_CONTROLLER: 1044 nvme_ctrlr_reset(ctrlr); 1045 break; 1046 case NVME_PASSTHROUGH_CMD: 1047 pt = (struct nvme_pt_command *)arg; 1048 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 1049 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1050 default: 1051 return (ENOTTY); 1052 } 1053 1054 return (0); 1055} 1056 1057static struct cdevsw nvme_ctrlr_cdevsw = { 1058 .d_version = D_VERSION, 1059 .d_flags = 0, 1060 .d_ioctl = nvme_ctrlr_ioctl 1061}; 1062 1063static void 1064nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 1065{ 1066 device_t dev; 1067 int per_cpu_io_queues; 1068 int min_cpus_per_ioq; 1069 int num_vectors_requested, num_vectors_allocated; 1070 int num_vectors_available; 1071 1072 dev = ctrlr->dev; 1073 min_cpus_per_ioq = 1; 1074 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); 1075 1076 if (min_cpus_per_ioq < 1) { 1077 min_cpus_per_ioq = 1; 1078 } else if (min_cpus_per_ioq > mp_ncpus) { 1079 min_cpus_per_ioq = mp_ncpus; 1080 } 1081 1082 per_cpu_io_queues = 1; 1083 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 1084 1085 if (per_cpu_io_queues == 0) { 1086 min_cpus_per_ioq = mp_ncpus; 1087 } 1088 1089 ctrlr->force_intx = 0; 1090 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 1091 1092 /* 1093 * FreeBSD currently cannot allocate more than about 190 vectors at 1094 * boot, meaning that systems with high core count and many devices 1095 * requesting per-CPU interrupt vectors will not get their full 1096 * allotment. So first, try to allocate as many as we may need to 1097 * understand what is available, then immediately release them. 1098 * Then figure out how many of those we will actually use, based on 1099 * assigning an equal number of cores to each I/O queue. 1100 */ 1101 1102 /* One vector for per core I/O queue, plus one vector for admin queue. */ 1103 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 1104 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 1105 num_vectors_available = 0; 1106 } 1107 pci_release_msi(dev); 1108 1109 if (ctrlr->force_intx || num_vectors_available < 2) { 1110 nvme_ctrlr_configure_intx(ctrlr); 1111 return; 1112 } 1113 1114 /* 1115 * Do not use all vectors for I/O queues - one must be saved for the 1116 * admin queue. 1117 */ 1118 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, 1119 howmany(mp_ncpus, num_vectors_available - 1)); 1120 1121 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); 1122 num_vectors_requested = ctrlr->num_io_queues + 1; 1123 num_vectors_allocated = num_vectors_requested; 1124 1125 /* 1126 * Now just allocate the number of vectors we need. This should 1127 * succeed, since we previously called pci_alloc_msix() 1128 * successfully returning at least this many vectors, but just to 1129 * be safe, if something goes wrong just revert to INTx. 1130 */ 1131 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1132 nvme_ctrlr_configure_intx(ctrlr); 1133 return; 1134 } 1135 1136 if (num_vectors_allocated < num_vectors_requested) { 1137 pci_release_msi(dev); 1138 nvme_ctrlr_configure_intx(ctrlr); 1139 return; 1140 } 1141 1142 ctrlr->msix_enabled = 1; 1143} 1144 1145int 1146nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1147{ 1148 struct make_dev_args md_args; 1149 union cap_lo_register cap_lo; 1150 union cap_hi_register cap_hi; 1151 int status, timeout_period; 1152 1153 ctrlr->dev = dev; 1154 1155 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1156 1157 status = nvme_ctrlr_allocate_bar(ctrlr); 1158 1159 if (status != 0) 1160 return (status); 1161 1162 /* 1163 * Software emulators may set the doorbell stride to something 1164 * other than zero, but this driver is not set up to handle that. 1165 */ 1166 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 1167 if (cap_hi.bits.dstrd != 0) 1168 return (ENXIO); 1169 1170 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 1171 1172 /* Get ready timeout value from controller, in units of 500ms. */ 1173 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 1174 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 1175 1176 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1177 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1178 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1179 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1180 ctrlr->timeout_period = timeout_period; 1181 1182 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1183 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1184 1185 ctrlr->enable_aborts = 0; 1186 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1187 1188 nvme_ctrlr_setup_interrupts(ctrlr); 1189 1190 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1191 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1192 return (ENXIO); 1193 1194 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1195 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1196 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1197 1198 ctrlr->is_resetting = 0; 1199 ctrlr->is_initialized = 0; 1200 ctrlr->notification_sent = 0; 1201 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1202 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1203 STAILQ_INIT(&ctrlr->fail_req); 1204 ctrlr->is_failed = FALSE; 1205 1206 make_dev_args_init(&md_args); 1207 md_args.mda_devsw = &nvme_ctrlr_cdevsw; 1208 md_args.mda_uid = UID_ROOT; 1209 md_args.mda_gid = GID_WHEEL; 1210 md_args.mda_mode = 0600; 1211 md_args.mda_unit = device_get_unit(dev); 1212 md_args.mda_si_drv1 = (void *)ctrlr; 1213 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d", 1214 device_get_unit(dev)); 1215 if (status != 0) 1216 return (ENXIO); 1217 1218 return (0); 1219} 1220 1221void 1222nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1223{ 1224 int i; 1225 1226 if (ctrlr->resource == NULL) 1227 goto nores; 1228 1229 nvme_notify_fail_consumers(ctrlr); 1230 1231 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1232 nvme_ns_destruct(&ctrlr->ns[i]); 1233 1234 if (ctrlr->cdev) 1235 destroy_dev(ctrlr->cdev); 1236 1237 for (i = 0; i < ctrlr->num_io_queues; i++) { 1238 nvme_ctrlr_destroy_qpair(ctrlr, &ctrlr->ioq[i]); 1239 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1240 } 1241 free(ctrlr->ioq, M_NVME); 1242 1243 nvme_admin_qpair_destroy(&ctrlr->adminq); 1244 1245 /* 1246 * Notify the controller of a shutdown, even though this is due to 1247 * a driver unload, not a system shutdown (this path is not invoked 1248 * during shutdown). This ensures the controller receives a 1249 * shutdown notification in case the system is shutdown before 1250 * reloading the driver. 1251 */ 1252 nvme_ctrlr_shutdown(ctrlr); 1253 1254 nvme_ctrlr_disable(ctrlr); 1255 1256 if (ctrlr->taskqueue) 1257 taskqueue_free(ctrlr->taskqueue); 1258 1259 if (ctrlr->tag) 1260 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1261 1262 if (ctrlr->res) 1263 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1264 rman_get_rid(ctrlr->res), ctrlr->res); 1265 1266 if (ctrlr->msix_enabled) 1267 pci_release_msi(dev); 1268 1269 if (ctrlr->bar4_resource != NULL) { 1270 bus_release_resource(dev, SYS_RES_MEMORY, 1271 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1272 } 1273 1274 bus_release_resource(dev, SYS_RES_MEMORY, 1275 ctrlr->resource_id, ctrlr->resource); 1276 1277nores: 1278 mtx_destroy(&ctrlr->lock); 1279} 1280 1281void 1282nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1283{ 1284 union cc_register cc; 1285 union csts_register csts; 1286 int ticks = 0; 1287 1288 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1289 cc.bits.shn = NVME_SHN_NORMAL; 1290 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1291 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1292 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1293 pause("nvme shn", 1); 1294 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1295 } 1296 if (csts.bits.shst != NVME_SHST_COMPLETE) 1297 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1298 "of notification\n"); 1299} 1300 1301void 1302nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1303 struct nvme_request *req) 1304{ 1305 1306 nvme_qpair_submit_request(&ctrlr->adminq, req); 1307} 1308 1309void 1310nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1311 struct nvme_request *req) 1312{ 1313 struct nvme_qpair *qpair; 1314 1315 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; 1316 nvme_qpair_submit_request(qpair, req); 1317} 1318 1319device_t 1320nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1321{ 1322 1323 return (ctrlr->dev); 1324} 1325 1326const struct nvme_controller_data * 1327nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1328{ 1329 1330 return (&ctrlr->cdata); 1331} 1332