nvme_ctrlr.c revision 293667
1/*- 2 * Copyright (C) 2012-2015 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ctrlr.c 293667 2016-01-11 17:26:06Z jimharris $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/buf.h> 33#include <sys/bus.h> 34#include <sys/conf.h> 35#include <sys/ioccom.h> 36#include <sys/proc.h> 37#include <sys/smp.h> 38#include <sys/uio.h> 39 40#include <dev/pci/pcireg.h> 41#include <dev/pci/pcivar.h> 42 43#include "nvme_private.h" 44 45static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 46 struct nvme_async_event_request *aer); 47 48static int 49nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 50{ 51 52 ctrlr->resource_id = PCIR_BAR(0); 53 54 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 55 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); 56 57 if(ctrlr->resource == NULL) { 58 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 59 return (ENOMEM); 60 } 61 62 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 63 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 64 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 65 66 /* 67 * The NVMe spec allows for the MSI-X table to be placed behind 68 * BAR 4/5, separate from the control/doorbell registers. Always 69 * try to map this bar, because it must be mapped prior to calling 70 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 71 * bus_alloc_resource() will just return NULL which is OK. 72 */ 73 ctrlr->bar4_resource_id = PCIR_BAR(4); 74 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 75 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); 76 77 return (0); 78} 79 80static void 81nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 82{ 83 struct nvme_qpair *qpair; 84 uint32_t num_entries; 85 86 qpair = &ctrlr->adminq; 87 88 num_entries = NVME_ADMIN_ENTRIES; 89 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 90 /* 91 * If admin_entries was overridden to an invalid value, revert it 92 * back to our default value. 93 */ 94 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 95 num_entries > NVME_MAX_ADMIN_ENTRIES) { 96 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 97 "specified\n", num_entries); 98 num_entries = NVME_ADMIN_ENTRIES; 99 } 100 101 /* 102 * The admin queue's max xfer size is treated differently than the 103 * max I/O xfer size. 16KB is sufficient here - maybe even less? 104 */ 105 nvme_qpair_construct(qpair, 106 0, /* qpair ID */ 107 0, /* vector */ 108 num_entries, 109 NVME_ADMIN_TRACKERS, 110 ctrlr); 111} 112 113static int 114nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 115{ 116 struct nvme_qpair *qpair; 117 union cap_lo_register cap_lo; 118 int i, num_entries, num_trackers; 119 120 num_entries = NVME_IO_ENTRIES; 121 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 122 123 /* 124 * NVMe spec sets a hard limit of 64K max entries, but 125 * devices may specify a smaller limit, so we need to check 126 * the MQES field in the capabilities register. 127 */ 128 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 129 num_entries = min(num_entries, cap_lo.bits.mqes+1); 130 131 num_trackers = NVME_IO_TRACKERS; 132 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 133 134 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 135 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 136 /* 137 * No need to have more trackers than entries in the submit queue. 138 * Note also that for a queue size of N, we can only have (N-1) 139 * commands outstanding, hence the "-1" here. 140 */ 141 num_trackers = min(num_trackers, (num_entries-1)); 142 143 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 144 M_NVME, M_ZERO | M_WAITOK); 145 146 for (i = 0; i < ctrlr->num_io_queues; i++) { 147 qpair = &ctrlr->ioq[i]; 148 149 /* 150 * Admin queue has ID=0. IO queues start at ID=1 - 151 * hence the 'i+1' here. 152 * 153 * For I/O queues, use the controller-wide max_xfer_size 154 * calculated in nvme_attach(). 155 */ 156 nvme_qpair_construct(qpair, 157 i+1, /* qpair ID */ 158 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 159 num_entries, 160 num_trackers, 161 ctrlr); 162 163 if (ctrlr->per_cpu_io_queues) 164 bus_bind_intr(ctrlr->dev, qpair->res, i); 165 } 166 167 return (0); 168} 169 170static void 171nvme_ctrlr_fail(struct nvme_controller *ctrlr) 172{ 173 int i; 174 175 ctrlr->is_failed = TRUE; 176 nvme_qpair_fail(&ctrlr->adminq); 177 for (i = 0; i < ctrlr->num_io_queues; i++) 178 nvme_qpair_fail(&ctrlr->ioq[i]); 179 nvme_notify_fail_consumers(ctrlr); 180} 181 182void 183nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 184 struct nvme_request *req) 185{ 186 187 mtx_lock(&ctrlr->lock); 188 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 189 mtx_unlock(&ctrlr->lock); 190 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 191} 192 193static void 194nvme_ctrlr_fail_req_task(void *arg, int pending) 195{ 196 struct nvme_controller *ctrlr = arg; 197 struct nvme_request *req; 198 199 mtx_lock(&ctrlr->lock); 200 while (!STAILQ_EMPTY(&ctrlr->fail_req)) { 201 req = STAILQ_FIRST(&ctrlr->fail_req); 202 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 203 nvme_qpair_manual_complete_request(req->qpair, req, 204 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 205 } 206 mtx_unlock(&ctrlr->lock); 207} 208 209static int 210nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 211{ 212 int ms_waited; 213 union cc_register cc; 214 union csts_register csts; 215 216 cc.raw = nvme_mmio_read_4(ctrlr, cc); 217 csts.raw = nvme_mmio_read_4(ctrlr, csts); 218 219 if (cc.bits.en != desired_val) { 220 nvme_printf(ctrlr, "%s called with desired_val = %d " 221 "but cc.en = %d\n", __func__, desired_val, cc.bits.en); 222 return (ENXIO); 223 } 224 225 ms_waited = 0; 226 227 while (csts.bits.rdy != desired_val) { 228 DELAY(1000); 229 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 230 nvme_printf(ctrlr, "controller ready did not become %d " 231 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 232 return (ENXIO); 233 } 234 csts.raw = nvme_mmio_read_4(ctrlr, csts); 235 } 236 237 return (0); 238} 239 240static void 241nvme_ctrlr_disable(struct nvme_controller *ctrlr) 242{ 243 union cc_register cc; 244 union csts_register csts; 245 246 cc.raw = nvme_mmio_read_4(ctrlr, cc); 247 csts.raw = nvme_mmio_read_4(ctrlr, csts); 248 249 if (cc.bits.en == 1 && csts.bits.rdy == 0) 250 nvme_ctrlr_wait_for_ready(ctrlr, 1); 251 252 cc.bits.en = 0; 253 nvme_mmio_write_4(ctrlr, cc, cc.raw); 254 DELAY(5000); 255 nvme_ctrlr_wait_for_ready(ctrlr, 0); 256} 257 258static int 259nvme_ctrlr_enable(struct nvme_controller *ctrlr) 260{ 261 union cc_register cc; 262 union csts_register csts; 263 union aqa_register aqa; 264 265 cc.raw = nvme_mmio_read_4(ctrlr, cc); 266 csts.raw = nvme_mmio_read_4(ctrlr, csts); 267 268 if (cc.bits.en == 1) { 269 if (csts.bits.rdy == 1) 270 return (0); 271 else 272 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 273 } 274 275 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 276 DELAY(5000); 277 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 278 DELAY(5000); 279 280 aqa.raw = 0; 281 /* acqs and asqs are 0-based. */ 282 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 283 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 284 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 285 DELAY(5000); 286 287 cc.bits.en = 1; 288 cc.bits.css = 0; 289 cc.bits.ams = 0; 290 cc.bits.shn = 0; 291 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 292 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 293 294 /* This evaluates to 0, which is according to spec. */ 295 cc.bits.mps = (PAGE_SIZE >> 13); 296 297 nvme_mmio_write_4(ctrlr, cc, cc.raw); 298 DELAY(5000); 299 300 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 301} 302 303int 304nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 305{ 306 int i; 307 308 nvme_admin_qpair_disable(&ctrlr->adminq); 309 for (i = 0; i < ctrlr->num_io_queues; i++) 310 nvme_io_qpair_disable(&ctrlr->ioq[i]); 311 312 DELAY(100*1000); 313 314 nvme_ctrlr_disable(ctrlr); 315 return (nvme_ctrlr_enable(ctrlr)); 316} 317 318void 319nvme_ctrlr_reset(struct nvme_controller *ctrlr) 320{ 321 int cmpset; 322 323 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 324 325 if (cmpset == 0 || ctrlr->is_failed) 326 /* 327 * Controller is already resetting or has failed. Return 328 * immediately since there is no need to kick off another 329 * reset in these cases. 330 */ 331 return; 332 333 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 334} 335 336static int 337nvme_ctrlr_identify(struct nvme_controller *ctrlr) 338{ 339 struct nvme_completion_poll_status status; 340 341 status.done = FALSE; 342 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 343 nvme_completion_poll_cb, &status); 344 while (status.done == FALSE) 345 pause("nvme", 1); 346 if (nvme_completion_is_error(&status.cpl)) { 347 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 348 return (ENXIO); 349 } 350 351 /* 352 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 353 * controller supports. 354 */ 355 if (ctrlr->cdata.mdts > 0) 356 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 357 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 358 359 return (0); 360} 361 362static int 363nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 364{ 365 struct nvme_completion_poll_status status; 366 int cq_allocated, i, sq_allocated; 367 368 status.done = FALSE; 369 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 370 nvme_completion_poll_cb, &status); 371 while (status.done == FALSE) 372 pause("nvme", 1); 373 if (nvme_completion_is_error(&status.cpl)) { 374 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); 375 return (ENXIO); 376 } 377 378 /* 379 * Data in cdw0 is 0-based. 380 * Lower 16-bits indicate number of submission queues allocated. 381 * Upper 16-bits indicate number of completion queues allocated. 382 */ 383 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 384 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 385 386 /* 387 * Check that the controller was able to allocate the number of 388 * queues we requested. If not, revert to one IO queue pair. 389 */ 390 if (sq_allocated < ctrlr->num_io_queues || 391 cq_allocated < ctrlr->num_io_queues) { 392 393 /* 394 * Destroy extra IO queue pairs that were created at 395 * controller construction time but are no longer 396 * needed. This will only happen when a controller 397 * supports fewer queues than MSI-X vectors. This 398 * is not the normal case, but does occur with the 399 * Chatham prototype board. 400 */ 401 for (i = 1; i < ctrlr->num_io_queues; i++) 402 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 403 404 ctrlr->num_io_queues = 1; 405 ctrlr->per_cpu_io_queues = 0; 406 } 407 408 return (0); 409} 410 411static int 412nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 413{ 414 struct nvme_completion_poll_status status; 415 struct nvme_qpair *qpair; 416 int i; 417 418 for (i = 0; i < ctrlr->num_io_queues; i++) { 419 qpair = &ctrlr->ioq[i]; 420 421 status.done = FALSE; 422 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 423 nvme_completion_poll_cb, &status); 424 while (status.done == FALSE) 425 pause("nvme", 1); 426 if (nvme_completion_is_error(&status.cpl)) { 427 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 428 return (ENXIO); 429 } 430 431 status.done = FALSE; 432 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 433 nvme_completion_poll_cb, &status); 434 while (status.done == FALSE) 435 pause("nvme", 1); 436 if (nvme_completion_is_error(&status.cpl)) { 437 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 438 return (ENXIO); 439 } 440 } 441 442 return (0); 443} 444 445static int 446nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 447{ 448 struct nvme_namespace *ns; 449 int i, status; 450 451 for (i = 0; i < ctrlr->cdata.nn; i++) { 452 ns = &ctrlr->ns[i]; 453 status = nvme_ns_construct(ns, i+1, ctrlr); 454 if (status != 0) 455 return (status); 456 } 457 458 return (0); 459} 460 461static boolean_t 462is_log_page_id_valid(uint8_t page_id) 463{ 464 465 switch (page_id) { 466 case NVME_LOG_ERROR: 467 case NVME_LOG_HEALTH_INFORMATION: 468 case NVME_LOG_FIRMWARE_SLOT: 469 return (TRUE); 470 } 471 472 return (FALSE); 473} 474 475static uint32_t 476nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 477{ 478 uint32_t log_page_size; 479 480 switch (page_id) { 481 case NVME_LOG_ERROR: 482 log_page_size = min( 483 sizeof(struct nvme_error_information_entry) * 484 ctrlr->cdata.elpe, 485 NVME_MAX_AER_LOG_SIZE); 486 break; 487 case NVME_LOG_HEALTH_INFORMATION: 488 log_page_size = sizeof(struct nvme_health_information_page); 489 break; 490 case NVME_LOG_FIRMWARE_SLOT: 491 log_page_size = sizeof(struct nvme_firmware_page); 492 break; 493 default: 494 log_page_size = 0; 495 break; 496 } 497 498 return (log_page_size); 499} 500 501static void 502nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 503 union nvme_critical_warning_state state) 504{ 505 506 if (state.bits.available_spare == 1) 507 nvme_printf(ctrlr, "available spare space below threshold\n"); 508 509 if (state.bits.temperature == 1) 510 nvme_printf(ctrlr, "temperature above threshold\n"); 511 512 if (state.bits.device_reliability == 1) 513 nvme_printf(ctrlr, "device reliability degraded\n"); 514 515 if (state.bits.read_only == 1) 516 nvme_printf(ctrlr, "media placed in read only mode\n"); 517 518 if (state.bits.volatile_memory_backup == 1) 519 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 520 521 if (state.bits.reserved != 0) 522 nvme_printf(ctrlr, 523 "unknown critical warning(s): state = 0x%02x\n", state.raw); 524} 525 526static void 527nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 528{ 529 struct nvme_async_event_request *aer = arg; 530 struct nvme_health_information_page *health_info; 531 532 /* 533 * If the log page fetch for some reason completed with an error, 534 * don't pass log page data to the consumers. In practice, this case 535 * should never happen. 536 */ 537 if (nvme_completion_is_error(cpl)) 538 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 539 aer->log_page_id, NULL, 0); 540 else { 541 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 542 health_info = (struct nvme_health_information_page *) 543 aer->log_page_buffer; 544 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 545 health_info->critical_warning); 546 /* 547 * Critical warnings reported through the 548 * SMART/health log page are persistent, so 549 * clear the associated bits in the async event 550 * config so that we do not receive repeated 551 * notifications for the same event. 552 */ 553 aer->ctrlr->async_event_config.raw &= 554 ~health_info->critical_warning.raw; 555 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 556 aer->ctrlr->async_event_config, NULL, NULL); 557 } 558 559 560 /* 561 * Pass the cpl data from the original async event completion, 562 * not the log page fetch. 563 */ 564 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 565 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 566 } 567 568 /* 569 * Repost another asynchronous event request to replace the one 570 * that just completed. 571 */ 572 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 573} 574 575static void 576nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 577{ 578 struct nvme_async_event_request *aer = arg; 579 580 if (nvme_completion_is_error(cpl)) { 581 /* 582 * Do not retry failed async event requests. This avoids 583 * infinite loops where a new async event request is submitted 584 * to replace the one just failed, only to fail again and 585 * perpetuate the loop. 586 */ 587 return; 588 } 589 590 /* Associated log page is in bits 23:16 of completion entry dw0. */ 591 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 592 593 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 594 aer->log_page_id); 595 596 if (is_log_page_id_valid(aer->log_page_id)) { 597 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 598 aer->log_page_id); 599 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 600 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 601 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 602 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 603 aer); 604 /* Wait to notify consumers until after log page is fetched. */ 605 } else { 606 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 607 NULL, 0); 608 609 /* 610 * Repost another asynchronous event request to replace the one 611 * that just completed. 612 */ 613 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 614 } 615} 616 617static void 618nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 619 struct nvme_async_event_request *aer) 620{ 621 struct nvme_request *req; 622 623 aer->ctrlr = ctrlr; 624 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 625 aer->req = req; 626 627 /* 628 * Disable timeout here, since asynchronous event requests should by 629 * nature never be timed out. 630 */ 631 req->timeout = FALSE; 632 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 633 nvme_ctrlr_submit_admin_request(ctrlr, req); 634} 635 636static void 637nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 638{ 639 struct nvme_completion_poll_status status; 640 struct nvme_async_event_request *aer; 641 uint32_t i; 642 643 ctrlr->async_event_config.raw = 0xFF; 644 ctrlr->async_event_config.bits.reserved = 0; 645 646 status.done = FALSE; 647 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 648 0, NULL, 0, nvme_completion_poll_cb, &status); 649 while (status.done == FALSE) 650 pause("nvme", 1); 651 if (nvme_completion_is_error(&status.cpl) || 652 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 653 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 654 nvme_printf(ctrlr, "temperature threshold not supported\n"); 655 ctrlr->async_event_config.bits.temperature = 0; 656 } 657 658 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 659 ctrlr->async_event_config, NULL, NULL); 660 661 /* aerl is a zero-based value, so we need to add 1 here. */ 662 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 663 664 for (i = 0; i < ctrlr->num_aers; i++) { 665 aer = &ctrlr->aer[i]; 666 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 667 } 668} 669 670static void 671nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 672{ 673 674 ctrlr->int_coal_time = 0; 675 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 676 &ctrlr->int_coal_time); 677 678 ctrlr->int_coal_threshold = 0; 679 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 680 &ctrlr->int_coal_threshold); 681 682 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 683 ctrlr->int_coal_threshold, NULL, NULL); 684} 685 686static void 687nvme_ctrlr_start(void *ctrlr_arg) 688{ 689 struct nvme_controller *ctrlr = ctrlr_arg; 690 int i; 691 692 nvme_qpair_reset(&ctrlr->adminq); 693 for (i = 0; i < ctrlr->num_io_queues; i++) 694 nvme_qpair_reset(&ctrlr->ioq[i]); 695 696 nvme_admin_qpair_enable(&ctrlr->adminq); 697 698 if (nvme_ctrlr_identify(ctrlr) != 0) { 699 nvme_ctrlr_fail(ctrlr); 700 return; 701 } 702 703 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 704 nvme_ctrlr_fail(ctrlr); 705 return; 706 } 707 708 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 709 nvme_ctrlr_fail(ctrlr); 710 return; 711 } 712 713 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 714 nvme_ctrlr_fail(ctrlr); 715 return; 716 } 717 718 nvme_ctrlr_configure_aer(ctrlr); 719 nvme_ctrlr_configure_int_coalescing(ctrlr); 720 721 for (i = 0; i < ctrlr->num_io_queues; i++) 722 nvme_io_qpair_enable(&ctrlr->ioq[i]); 723} 724 725void 726nvme_ctrlr_start_config_hook(void *arg) 727{ 728 struct nvme_controller *ctrlr = arg; 729 730 nvme_ctrlr_start(ctrlr); 731 config_intrhook_disestablish(&ctrlr->config_hook); 732 733 ctrlr->is_initialized = 1; 734 nvme_notify_new_controller(ctrlr); 735} 736 737static void 738nvme_ctrlr_reset_task(void *arg, int pending) 739{ 740 struct nvme_controller *ctrlr = arg; 741 int status; 742 743 nvme_printf(ctrlr, "resetting controller\n"); 744 status = nvme_ctrlr_hw_reset(ctrlr); 745 /* 746 * Use pause instead of DELAY, so that we yield to any nvme interrupt 747 * handlers on this CPU that were blocked on a qpair lock. We want 748 * all nvme interrupts completed before proceeding with restarting the 749 * controller. 750 * 751 * XXX - any way to guarantee the interrupt handlers have quiesced? 752 */ 753 pause("nvmereset", hz / 10); 754 if (status == 0) 755 nvme_ctrlr_start(ctrlr); 756 else 757 nvme_ctrlr_fail(ctrlr); 758 759 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 760} 761 762static void 763nvme_ctrlr_intx_handler(void *arg) 764{ 765 struct nvme_controller *ctrlr = arg; 766 767 nvme_mmio_write_4(ctrlr, intms, 1); 768 769 nvme_qpair_process_completions(&ctrlr->adminq); 770 771 if (ctrlr->ioq[0].cpl) 772 nvme_qpair_process_completions(&ctrlr->ioq[0]); 773 774 nvme_mmio_write_4(ctrlr, intmc, 1); 775} 776 777static int 778nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 779{ 780 781 ctrlr->num_io_queues = 1; 782 ctrlr->per_cpu_io_queues = 0; 783 ctrlr->rid = 0; 784 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 785 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 786 787 if (ctrlr->res == NULL) { 788 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 789 return (ENOMEM); 790 } 791 792 bus_setup_intr(ctrlr->dev, ctrlr->res, 793 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 794 ctrlr, &ctrlr->tag); 795 796 if (ctrlr->tag == NULL) { 797 nvme_printf(ctrlr, "unable to setup intx handler\n"); 798 return (ENOMEM); 799 } 800 801 return (0); 802} 803 804static void 805nvme_pt_done(void *arg, const struct nvme_completion *cpl) 806{ 807 struct nvme_pt_command *pt = arg; 808 809 bzero(&pt->cpl, sizeof(pt->cpl)); 810 pt->cpl.cdw0 = cpl->cdw0; 811 pt->cpl.status = cpl->status; 812 pt->cpl.status.p = 0; 813 814 mtx_lock(pt->driver_lock); 815 wakeup(pt); 816 mtx_unlock(pt->driver_lock); 817} 818 819int 820nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 821 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 822 int is_admin_cmd) 823{ 824 struct nvme_request *req; 825 struct mtx *mtx; 826 struct buf *buf = NULL; 827 int ret = 0; 828 829 if (pt->len > 0) { 830 if (pt->len > ctrlr->max_xfer_size) { 831 nvme_printf(ctrlr, "pt->len (%d) " 832 "exceeds max_xfer_size (%d)\n", pt->len, 833 ctrlr->max_xfer_size); 834 return EIO; 835 } 836 if (is_user_buffer) { 837 /* 838 * Ensure the user buffer is wired for the duration of 839 * this passthrough command. 840 */ 841 PHOLD(curproc); 842 buf = getpbuf(NULL); 843 buf->b_saveaddr = buf->b_data; 844 buf->b_data = pt->buf; 845 buf->b_bufsize = pt->len; 846 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 847#ifdef NVME_UNMAPPED_BIO_SUPPORT 848 if (vmapbuf(buf, 1) < 0) { 849#else 850 if (vmapbuf(buf) < 0) { 851#endif 852 ret = EFAULT; 853 goto err; 854 } 855 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 856 nvme_pt_done, pt); 857 } else 858 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 859 nvme_pt_done, pt); 860 } else 861 req = nvme_allocate_request_null(nvme_pt_done, pt); 862 863 req->cmd.opc = pt->cmd.opc; 864 req->cmd.cdw10 = pt->cmd.cdw10; 865 req->cmd.cdw11 = pt->cmd.cdw11; 866 req->cmd.cdw12 = pt->cmd.cdw12; 867 req->cmd.cdw13 = pt->cmd.cdw13; 868 req->cmd.cdw14 = pt->cmd.cdw14; 869 req->cmd.cdw15 = pt->cmd.cdw15; 870 871 req->cmd.nsid = nsid; 872 873 if (is_admin_cmd) 874 mtx = &ctrlr->lock; 875 else 876 mtx = &ctrlr->ns[nsid-1].lock; 877 878 mtx_lock(mtx); 879 pt->driver_lock = mtx; 880 881 if (is_admin_cmd) 882 nvme_ctrlr_submit_admin_request(ctrlr, req); 883 else 884 nvme_ctrlr_submit_io_request(ctrlr, req); 885 886 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 887 mtx_unlock(mtx); 888 889 pt->driver_lock = NULL; 890 891err: 892 if (buf != NULL) { 893 relpbuf(buf, NULL); 894 PRELE(curproc); 895 } 896 897 return (ret); 898} 899 900static int 901nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 902 struct thread *td) 903{ 904 struct nvme_controller *ctrlr; 905 struct nvme_pt_command *pt; 906 907 ctrlr = cdev->si_drv1; 908 909 switch (cmd) { 910 case NVME_RESET_CONTROLLER: 911 nvme_ctrlr_reset(ctrlr); 912 break; 913 case NVME_PASSTHROUGH_CMD: 914 pt = (struct nvme_pt_command *)arg; 915 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 916 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 917 default: 918 return (ENOTTY); 919 } 920 921 return (0); 922} 923 924static struct cdevsw nvme_ctrlr_cdevsw = { 925 .d_version = D_VERSION, 926 .d_flags = 0, 927 .d_ioctl = nvme_ctrlr_ioctl 928}; 929 930int 931nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 932{ 933 union cap_lo_register cap_lo; 934 union cap_hi_register cap_hi; 935 int i, per_cpu_io_queues, rid; 936 int num_vectors_requested, num_vectors_allocated; 937 int status, timeout_period; 938 939 ctrlr->dev = dev; 940 941 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 942 943 status = nvme_ctrlr_allocate_bar(ctrlr); 944 945 if (status != 0) 946 return (status); 947 948 /* 949 * Software emulators may set the doorbell stride to something 950 * other than zero, but this driver is not set up to handle that. 951 */ 952 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 953 if (cap_hi.bits.dstrd != 0) 954 return (ENXIO); 955 956 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 957 958 /* Get ready timeout value from controller, in units of 500ms. */ 959 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 960 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 961 962 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 963 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 964 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 965 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 966 ctrlr->timeout_period = timeout_period; 967 968 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 969 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 970 971 per_cpu_io_queues = 1; 972 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 973 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE; 974 975 if (ctrlr->per_cpu_io_queues) 976 ctrlr->num_io_queues = mp_ncpus; 977 else 978 ctrlr->num_io_queues = 1; 979 980 ctrlr->force_intx = 0; 981 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 982 983 ctrlr->enable_aborts = 0; 984 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 985 986 ctrlr->msix_enabled = 1; 987 988 if (ctrlr->force_intx) { 989 ctrlr->msix_enabled = 0; 990 goto intx; 991 } 992 993 /* One vector per IO queue, plus one vector for admin queue. */ 994 num_vectors_requested = ctrlr->num_io_queues + 1; 995 996 /* 997 * If we cannot even allocate 2 vectors (one for admin, one for 998 * I/O), then revert to INTx. 999 */ 1000 if (pci_msix_count(dev) < 2) { 1001 ctrlr->msix_enabled = 0; 1002 goto intx; 1003 } 1004 1005 if (pci_msix_count(dev) < num_vectors_requested) { 1006 ctrlr->per_cpu_io_queues = FALSE; 1007 ctrlr->num_io_queues = 1; 1008 num_vectors_requested = 2; /* one for admin, one for I/O */ 1009 } 1010 1011 num_vectors_allocated = num_vectors_requested; 1012 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1013 ctrlr->msix_enabled = 0; 1014 goto intx; 1015 } 1016 1017 if (num_vectors_allocated < num_vectors_requested) { 1018 if (num_vectors_allocated < 2) { 1019 pci_release_msi(dev); 1020 ctrlr->msix_enabled = 0; 1021 goto intx; 1022 } 1023 1024 ctrlr->per_cpu_io_queues = FALSE; 1025 ctrlr->num_io_queues = 1; 1026 /* 1027 * Release whatever vectors were allocated, and just 1028 * reallocate the two needed for the admin and single 1029 * I/O qpair. 1030 */ 1031 num_vectors_allocated = 2; 1032 pci_release_msi(dev); 1033 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) 1034 panic("could not reallocate any vectors\n"); 1035 if (num_vectors_allocated != 2) 1036 panic("could not reallocate 2 vectors\n"); 1037 } 1038 1039 /* 1040 * On earlier FreeBSD releases, there are reports that 1041 * pci_alloc_msix() can return successfully with all vectors 1042 * requested, but a subsequent bus_alloc_resource_any() 1043 * for one of those vectors fails. This issue occurs more 1044 * readily with multiple devices using per-CPU vectors. 1045 * To workaround this issue, try to allocate the resources now, 1046 * and fall back to INTx if we cannot allocate all of them. 1047 * This issue cannot be reproduced on more recent versions of 1048 * FreeBSD which have increased the maximum number of MSI-X 1049 * vectors, but adding the workaround makes it easier for 1050 * vendors wishing to import this driver into kernels based on 1051 * older versions of FreeBSD. 1052 */ 1053 for (i = 0; i < num_vectors_allocated; i++) { 1054 rid = i + 1; 1055 ctrlr->msi_res[i] = bus_alloc_resource_any(ctrlr->dev, 1056 SYS_RES_IRQ, &rid, RF_ACTIVE); 1057 1058 if (ctrlr->msi_res[i] == NULL) { 1059 ctrlr->msix_enabled = 0; 1060 while (i > 0) { 1061 i--; 1062 bus_release_resource(ctrlr->dev, 1063 SYS_RES_IRQ, 1064 rman_get_rid(ctrlr->msi_res[i]), 1065 ctrlr->msi_res[i]); 1066 } 1067 pci_release_msi(dev); 1068 nvme_printf(ctrlr, "could not obtain all MSI-X " 1069 "resources, reverting to intx\n"); 1070 break; 1071 } 1072 } 1073 1074intx: 1075 1076 if (!ctrlr->msix_enabled) 1077 nvme_ctrlr_configure_intx(ctrlr); 1078 1079 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1080 nvme_ctrlr_construct_admin_qpair(ctrlr); 1081 status = nvme_ctrlr_construct_io_qpairs(ctrlr); 1082 1083 if (status != 0) 1084 return (status); 1085 1086 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), 1087 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); 1088 1089 if (ctrlr->cdev == NULL) 1090 return (ENXIO); 1091 1092 ctrlr->cdev->si_drv1 = (void *)ctrlr; 1093 1094 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1095 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1096 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1097 1098 ctrlr->is_resetting = 0; 1099 ctrlr->is_initialized = 0; 1100 ctrlr->notification_sent = 0; 1101 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1102 1103 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1104 STAILQ_INIT(&ctrlr->fail_req); 1105 ctrlr->is_failed = FALSE; 1106 1107 return (0); 1108} 1109 1110void 1111nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1112{ 1113 int i; 1114 1115 /* 1116 * Notify the controller of a shutdown, even though this is due to 1117 * a driver unload, not a system shutdown (this path is not invoked 1118 * during shutdown). This ensures the controller receives a 1119 * shutdown notification in case the system is shutdown before 1120 * reloading the driver. 1121 */ 1122 nvme_ctrlr_shutdown(ctrlr); 1123 1124 nvme_ctrlr_disable(ctrlr); 1125 taskqueue_free(ctrlr->taskqueue); 1126 1127 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1128 nvme_ns_destruct(&ctrlr->ns[i]); 1129 1130 if (ctrlr->cdev) 1131 destroy_dev(ctrlr->cdev); 1132 1133 for (i = 0; i < ctrlr->num_io_queues; i++) { 1134 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1135 } 1136 1137 free(ctrlr->ioq, M_NVME); 1138 1139 nvme_admin_qpair_destroy(&ctrlr->adminq); 1140 1141 if (ctrlr->resource != NULL) { 1142 bus_release_resource(dev, SYS_RES_MEMORY, 1143 ctrlr->resource_id, ctrlr->resource); 1144 } 1145 1146 if (ctrlr->bar4_resource != NULL) { 1147 bus_release_resource(dev, SYS_RES_MEMORY, 1148 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1149 } 1150 1151 if (ctrlr->tag) 1152 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1153 1154 if (ctrlr->res) 1155 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1156 rman_get_rid(ctrlr->res), ctrlr->res); 1157 1158 if (ctrlr->msix_enabled) 1159 pci_release_msi(dev); 1160} 1161 1162void 1163nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1164{ 1165 union cc_register cc; 1166 union csts_register csts; 1167 int ticks = 0; 1168 1169 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1170 cc.bits.shn = NVME_SHN_NORMAL; 1171 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1172 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1173 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1174 pause("nvme shn", 1); 1175 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1176 } 1177 if (csts.bits.shst != NVME_SHST_COMPLETE) 1178 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1179 "of notification\n"); 1180} 1181 1182void 1183nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1184 struct nvme_request *req) 1185{ 1186 1187 nvme_qpair_submit_request(&ctrlr->adminq, req); 1188} 1189 1190void 1191nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1192 struct nvme_request *req) 1193{ 1194 struct nvme_qpair *qpair; 1195 1196 if (ctrlr->per_cpu_io_queues) 1197 qpair = &ctrlr->ioq[curcpu]; 1198 else 1199 qpair = &ctrlr->ioq[0]; 1200 1201 nvme_qpair_submit_request(qpair, req); 1202} 1203 1204device_t 1205nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1206{ 1207 1208 return (ctrlr->dev); 1209} 1210 1211const struct nvme_controller_data * 1212nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1213{ 1214 1215 return (&ctrlr->cdata); 1216} 1217