nvme_ctrlr.c revision 293668
1/*- 2 * Copyright (C) 2012-2015 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ctrlr.c 293668 2016-01-11 17:27:20Z jimharris $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/buf.h> 33#include <sys/bus.h> 34#include <sys/conf.h> 35#include <sys/ioccom.h> 36#include <sys/proc.h> 37#include <sys/smp.h> 38#include <sys/uio.h> 39 40#include <dev/pci/pcireg.h> 41#include <dev/pci/pcivar.h> 42 43#include "nvme_private.h" 44 45static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 46 struct nvme_async_event_request *aer); 47 48static int 49nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 50{ 51 52 ctrlr->resource_id = PCIR_BAR(0); 53 54 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 55 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); 56 57 if(ctrlr->resource == NULL) { 58 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 59 return (ENOMEM); 60 } 61 62 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 63 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 64 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 65 66 /* 67 * The NVMe spec allows for the MSI-X table to be placed behind 68 * BAR 4/5, separate from the control/doorbell registers. Always 69 * try to map this bar, because it must be mapped prior to calling 70 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 71 * bus_alloc_resource() will just return NULL which is OK. 72 */ 73 ctrlr->bar4_resource_id = PCIR_BAR(4); 74 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 75 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); 76 77 return (0); 78} 79 80static void 81nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 82{ 83 struct nvme_qpair *qpair; 84 uint32_t num_entries; 85 86 qpair = &ctrlr->adminq; 87 88 num_entries = NVME_ADMIN_ENTRIES; 89 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 90 /* 91 * If admin_entries was overridden to an invalid value, revert it 92 * back to our default value. 93 */ 94 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 95 num_entries > NVME_MAX_ADMIN_ENTRIES) { 96 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 97 "specified\n", num_entries); 98 num_entries = NVME_ADMIN_ENTRIES; 99 } 100 101 /* 102 * The admin queue's max xfer size is treated differently than the 103 * max I/O xfer size. 16KB is sufficient here - maybe even less? 104 */ 105 nvme_qpair_construct(qpair, 106 0, /* qpair ID */ 107 0, /* vector */ 108 num_entries, 109 NVME_ADMIN_TRACKERS, 110 ctrlr); 111} 112 113static int 114nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 115{ 116 struct nvme_qpair *qpair; 117 union cap_lo_register cap_lo; 118 int i, num_entries, num_trackers; 119 120 num_entries = NVME_IO_ENTRIES; 121 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 122 123 /* 124 * NVMe spec sets a hard limit of 64K max entries, but 125 * devices may specify a smaller limit, so we need to check 126 * the MQES field in the capabilities register. 127 */ 128 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 129 num_entries = min(num_entries, cap_lo.bits.mqes+1); 130 131 num_trackers = NVME_IO_TRACKERS; 132 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 133 134 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 135 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 136 /* 137 * No need to have more trackers than entries in the submit queue. 138 * Note also that for a queue size of N, we can only have (N-1) 139 * commands outstanding, hence the "-1" here. 140 */ 141 num_trackers = min(num_trackers, (num_entries-1)); 142 143 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 144 M_NVME, M_ZERO | M_WAITOK); 145 146 for (i = 0; i < ctrlr->num_io_queues; i++) { 147 qpair = &ctrlr->ioq[i]; 148 149 /* 150 * Admin queue has ID=0. IO queues start at ID=1 - 151 * hence the 'i+1' here. 152 * 153 * For I/O queues, use the controller-wide max_xfer_size 154 * calculated in nvme_attach(). 155 */ 156 nvme_qpair_construct(qpair, 157 i+1, /* qpair ID */ 158 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 159 num_entries, 160 num_trackers, 161 ctrlr); 162 163 if (ctrlr->num_io_queues > 1) 164 bus_bind_intr(ctrlr->dev, qpair->res, i); 165 } 166 167 return (0); 168} 169 170static void 171nvme_ctrlr_fail(struct nvme_controller *ctrlr) 172{ 173 int i; 174 175 ctrlr->is_failed = TRUE; 176 nvme_qpair_fail(&ctrlr->adminq); 177 for (i = 0; i < ctrlr->num_io_queues; i++) 178 nvme_qpair_fail(&ctrlr->ioq[i]); 179 nvme_notify_fail_consumers(ctrlr); 180} 181 182void 183nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 184 struct nvme_request *req) 185{ 186 187 mtx_lock(&ctrlr->lock); 188 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 189 mtx_unlock(&ctrlr->lock); 190 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 191} 192 193static void 194nvme_ctrlr_fail_req_task(void *arg, int pending) 195{ 196 struct nvme_controller *ctrlr = arg; 197 struct nvme_request *req; 198 199 mtx_lock(&ctrlr->lock); 200 while (!STAILQ_EMPTY(&ctrlr->fail_req)) { 201 req = STAILQ_FIRST(&ctrlr->fail_req); 202 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 203 nvme_qpair_manual_complete_request(req->qpair, req, 204 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 205 } 206 mtx_unlock(&ctrlr->lock); 207} 208 209static int 210nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 211{ 212 int ms_waited; 213 union cc_register cc; 214 union csts_register csts; 215 216 cc.raw = nvme_mmio_read_4(ctrlr, cc); 217 csts.raw = nvme_mmio_read_4(ctrlr, csts); 218 219 if (cc.bits.en != desired_val) { 220 nvme_printf(ctrlr, "%s called with desired_val = %d " 221 "but cc.en = %d\n", __func__, desired_val, cc.bits.en); 222 return (ENXIO); 223 } 224 225 ms_waited = 0; 226 227 while (csts.bits.rdy != desired_val) { 228 DELAY(1000); 229 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 230 nvme_printf(ctrlr, "controller ready did not become %d " 231 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 232 return (ENXIO); 233 } 234 csts.raw = nvme_mmio_read_4(ctrlr, csts); 235 } 236 237 return (0); 238} 239 240static void 241nvme_ctrlr_disable(struct nvme_controller *ctrlr) 242{ 243 union cc_register cc; 244 union csts_register csts; 245 246 cc.raw = nvme_mmio_read_4(ctrlr, cc); 247 csts.raw = nvme_mmio_read_4(ctrlr, csts); 248 249 if (cc.bits.en == 1 && csts.bits.rdy == 0) 250 nvme_ctrlr_wait_for_ready(ctrlr, 1); 251 252 cc.bits.en = 0; 253 nvme_mmio_write_4(ctrlr, cc, cc.raw); 254 DELAY(5000); 255 nvme_ctrlr_wait_for_ready(ctrlr, 0); 256} 257 258static int 259nvme_ctrlr_enable(struct nvme_controller *ctrlr) 260{ 261 union cc_register cc; 262 union csts_register csts; 263 union aqa_register aqa; 264 265 cc.raw = nvme_mmio_read_4(ctrlr, cc); 266 csts.raw = nvme_mmio_read_4(ctrlr, csts); 267 268 if (cc.bits.en == 1) { 269 if (csts.bits.rdy == 1) 270 return (0); 271 else 272 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 273 } 274 275 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 276 DELAY(5000); 277 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 278 DELAY(5000); 279 280 aqa.raw = 0; 281 /* acqs and asqs are 0-based. */ 282 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 283 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 284 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 285 DELAY(5000); 286 287 cc.bits.en = 1; 288 cc.bits.css = 0; 289 cc.bits.ams = 0; 290 cc.bits.shn = 0; 291 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 292 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 293 294 /* This evaluates to 0, which is according to spec. */ 295 cc.bits.mps = (PAGE_SIZE >> 13); 296 297 nvme_mmio_write_4(ctrlr, cc, cc.raw); 298 DELAY(5000); 299 300 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 301} 302 303int 304nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 305{ 306 int i; 307 308 nvme_admin_qpair_disable(&ctrlr->adminq); 309 for (i = 0; i < ctrlr->num_io_queues; i++) 310 nvme_io_qpair_disable(&ctrlr->ioq[i]); 311 312 DELAY(100*1000); 313 314 nvme_ctrlr_disable(ctrlr); 315 return (nvme_ctrlr_enable(ctrlr)); 316} 317 318void 319nvme_ctrlr_reset(struct nvme_controller *ctrlr) 320{ 321 int cmpset; 322 323 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 324 325 if (cmpset == 0 || ctrlr->is_failed) 326 /* 327 * Controller is already resetting or has failed. Return 328 * immediately since there is no need to kick off another 329 * reset in these cases. 330 */ 331 return; 332 333 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 334} 335 336static int 337nvme_ctrlr_identify(struct nvme_controller *ctrlr) 338{ 339 struct nvme_completion_poll_status status; 340 341 status.done = FALSE; 342 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 343 nvme_completion_poll_cb, &status); 344 while (status.done == FALSE) 345 pause("nvme", 1); 346 if (nvme_completion_is_error(&status.cpl)) { 347 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 348 return (ENXIO); 349 } 350 351 /* 352 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 353 * controller supports. 354 */ 355 if (ctrlr->cdata.mdts > 0) 356 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 357 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 358 359 return (0); 360} 361 362static int 363nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 364{ 365 struct nvme_completion_poll_status status; 366 int cq_allocated, i, sq_allocated; 367 368 status.done = FALSE; 369 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 370 nvme_completion_poll_cb, &status); 371 while (status.done == FALSE) 372 pause("nvme", 1); 373 if (nvme_completion_is_error(&status.cpl)) { 374 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); 375 return (ENXIO); 376 } 377 378 /* 379 * Data in cdw0 is 0-based. 380 * Lower 16-bits indicate number of submission queues allocated. 381 * Upper 16-bits indicate number of completion queues allocated. 382 */ 383 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 384 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 385 386 /* 387 * Check that the controller was able to allocate the number of 388 * queues we requested. If not, revert to one IO queue pair. 389 */ 390 if (sq_allocated < ctrlr->num_io_queues || 391 cq_allocated < ctrlr->num_io_queues) { 392 393 /* 394 * Destroy extra IO queue pairs that were created at 395 * controller construction time but are no longer 396 * needed. This will only happen when a controller 397 * supports fewer queues than MSI-X vectors. This 398 * is not the normal case, but does occur with the 399 * Chatham prototype board. 400 */ 401 for (i = 1; i < ctrlr->num_io_queues; i++) 402 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 403 404 ctrlr->num_io_queues = 1; 405 } 406 407 return (0); 408} 409 410static int 411nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 412{ 413 struct nvme_completion_poll_status status; 414 struct nvme_qpair *qpair; 415 int i; 416 417 for (i = 0; i < ctrlr->num_io_queues; i++) { 418 qpair = &ctrlr->ioq[i]; 419 420 status.done = FALSE; 421 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 422 nvme_completion_poll_cb, &status); 423 while (status.done == FALSE) 424 pause("nvme", 1); 425 if (nvme_completion_is_error(&status.cpl)) { 426 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 427 return (ENXIO); 428 } 429 430 status.done = FALSE; 431 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 432 nvme_completion_poll_cb, &status); 433 while (status.done == FALSE) 434 pause("nvme", 1); 435 if (nvme_completion_is_error(&status.cpl)) { 436 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 437 return (ENXIO); 438 } 439 } 440 441 return (0); 442} 443 444static int 445nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 446{ 447 struct nvme_namespace *ns; 448 int i, status; 449 450 for (i = 0; i < ctrlr->cdata.nn; i++) { 451 ns = &ctrlr->ns[i]; 452 status = nvme_ns_construct(ns, i+1, ctrlr); 453 if (status != 0) 454 return (status); 455 } 456 457 return (0); 458} 459 460static boolean_t 461is_log_page_id_valid(uint8_t page_id) 462{ 463 464 switch (page_id) { 465 case NVME_LOG_ERROR: 466 case NVME_LOG_HEALTH_INFORMATION: 467 case NVME_LOG_FIRMWARE_SLOT: 468 return (TRUE); 469 } 470 471 return (FALSE); 472} 473 474static uint32_t 475nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 476{ 477 uint32_t log_page_size; 478 479 switch (page_id) { 480 case NVME_LOG_ERROR: 481 log_page_size = min( 482 sizeof(struct nvme_error_information_entry) * 483 ctrlr->cdata.elpe, 484 NVME_MAX_AER_LOG_SIZE); 485 break; 486 case NVME_LOG_HEALTH_INFORMATION: 487 log_page_size = sizeof(struct nvme_health_information_page); 488 break; 489 case NVME_LOG_FIRMWARE_SLOT: 490 log_page_size = sizeof(struct nvme_firmware_page); 491 break; 492 default: 493 log_page_size = 0; 494 break; 495 } 496 497 return (log_page_size); 498} 499 500static void 501nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 502 union nvme_critical_warning_state state) 503{ 504 505 if (state.bits.available_spare == 1) 506 nvme_printf(ctrlr, "available spare space below threshold\n"); 507 508 if (state.bits.temperature == 1) 509 nvme_printf(ctrlr, "temperature above threshold\n"); 510 511 if (state.bits.device_reliability == 1) 512 nvme_printf(ctrlr, "device reliability degraded\n"); 513 514 if (state.bits.read_only == 1) 515 nvme_printf(ctrlr, "media placed in read only mode\n"); 516 517 if (state.bits.volatile_memory_backup == 1) 518 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 519 520 if (state.bits.reserved != 0) 521 nvme_printf(ctrlr, 522 "unknown critical warning(s): state = 0x%02x\n", state.raw); 523} 524 525static void 526nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 527{ 528 struct nvme_async_event_request *aer = arg; 529 struct nvme_health_information_page *health_info; 530 531 /* 532 * If the log page fetch for some reason completed with an error, 533 * don't pass log page data to the consumers. In practice, this case 534 * should never happen. 535 */ 536 if (nvme_completion_is_error(cpl)) 537 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 538 aer->log_page_id, NULL, 0); 539 else { 540 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 541 health_info = (struct nvme_health_information_page *) 542 aer->log_page_buffer; 543 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 544 health_info->critical_warning); 545 /* 546 * Critical warnings reported through the 547 * SMART/health log page are persistent, so 548 * clear the associated bits in the async event 549 * config so that we do not receive repeated 550 * notifications for the same event. 551 */ 552 aer->ctrlr->async_event_config.raw &= 553 ~health_info->critical_warning.raw; 554 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 555 aer->ctrlr->async_event_config, NULL, NULL); 556 } 557 558 559 /* 560 * Pass the cpl data from the original async event completion, 561 * not the log page fetch. 562 */ 563 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 564 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 565 } 566 567 /* 568 * Repost another asynchronous event request to replace the one 569 * that just completed. 570 */ 571 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 572} 573 574static void 575nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 576{ 577 struct nvme_async_event_request *aer = arg; 578 579 if (nvme_completion_is_error(cpl)) { 580 /* 581 * Do not retry failed async event requests. This avoids 582 * infinite loops where a new async event request is submitted 583 * to replace the one just failed, only to fail again and 584 * perpetuate the loop. 585 */ 586 return; 587 } 588 589 /* Associated log page is in bits 23:16 of completion entry dw0. */ 590 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 591 592 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 593 aer->log_page_id); 594 595 if (is_log_page_id_valid(aer->log_page_id)) { 596 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 597 aer->log_page_id); 598 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 599 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 600 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 601 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 602 aer); 603 /* Wait to notify consumers until after log page is fetched. */ 604 } else { 605 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 606 NULL, 0); 607 608 /* 609 * Repost another asynchronous event request to replace the one 610 * that just completed. 611 */ 612 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 613 } 614} 615 616static void 617nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 618 struct nvme_async_event_request *aer) 619{ 620 struct nvme_request *req; 621 622 aer->ctrlr = ctrlr; 623 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 624 aer->req = req; 625 626 /* 627 * Disable timeout here, since asynchronous event requests should by 628 * nature never be timed out. 629 */ 630 req->timeout = FALSE; 631 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 632 nvme_ctrlr_submit_admin_request(ctrlr, req); 633} 634 635static void 636nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 637{ 638 struct nvme_completion_poll_status status; 639 struct nvme_async_event_request *aer; 640 uint32_t i; 641 642 ctrlr->async_event_config.raw = 0xFF; 643 ctrlr->async_event_config.bits.reserved = 0; 644 645 status.done = FALSE; 646 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 647 0, NULL, 0, nvme_completion_poll_cb, &status); 648 while (status.done == FALSE) 649 pause("nvme", 1); 650 if (nvme_completion_is_error(&status.cpl) || 651 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 652 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 653 nvme_printf(ctrlr, "temperature threshold not supported\n"); 654 ctrlr->async_event_config.bits.temperature = 0; 655 } 656 657 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 658 ctrlr->async_event_config, NULL, NULL); 659 660 /* aerl is a zero-based value, so we need to add 1 here. */ 661 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 662 663 for (i = 0; i < ctrlr->num_aers; i++) { 664 aer = &ctrlr->aer[i]; 665 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 666 } 667} 668 669static void 670nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 671{ 672 673 ctrlr->int_coal_time = 0; 674 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 675 &ctrlr->int_coal_time); 676 677 ctrlr->int_coal_threshold = 0; 678 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 679 &ctrlr->int_coal_threshold); 680 681 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 682 ctrlr->int_coal_threshold, NULL, NULL); 683} 684 685static void 686nvme_ctrlr_start(void *ctrlr_arg) 687{ 688 struct nvme_controller *ctrlr = ctrlr_arg; 689 int i; 690 691 nvme_qpair_reset(&ctrlr->adminq); 692 for (i = 0; i < ctrlr->num_io_queues; i++) 693 nvme_qpair_reset(&ctrlr->ioq[i]); 694 695 nvme_admin_qpair_enable(&ctrlr->adminq); 696 697 if (nvme_ctrlr_identify(ctrlr) != 0) { 698 nvme_ctrlr_fail(ctrlr); 699 return; 700 } 701 702 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 703 nvme_ctrlr_fail(ctrlr); 704 return; 705 } 706 707 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 708 nvme_ctrlr_fail(ctrlr); 709 return; 710 } 711 712 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 713 nvme_ctrlr_fail(ctrlr); 714 return; 715 } 716 717 nvme_ctrlr_configure_aer(ctrlr); 718 nvme_ctrlr_configure_int_coalescing(ctrlr); 719 720 for (i = 0; i < ctrlr->num_io_queues; i++) 721 nvme_io_qpair_enable(&ctrlr->ioq[i]); 722} 723 724void 725nvme_ctrlr_start_config_hook(void *arg) 726{ 727 struct nvme_controller *ctrlr = arg; 728 729 nvme_ctrlr_start(ctrlr); 730 config_intrhook_disestablish(&ctrlr->config_hook); 731 732 ctrlr->is_initialized = 1; 733 nvme_notify_new_controller(ctrlr); 734} 735 736static void 737nvme_ctrlr_reset_task(void *arg, int pending) 738{ 739 struct nvme_controller *ctrlr = arg; 740 int status; 741 742 nvme_printf(ctrlr, "resetting controller\n"); 743 status = nvme_ctrlr_hw_reset(ctrlr); 744 /* 745 * Use pause instead of DELAY, so that we yield to any nvme interrupt 746 * handlers on this CPU that were blocked on a qpair lock. We want 747 * all nvme interrupts completed before proceeding with restarting the 748 * controller. 749 * 750 * XXX - any way to guarantee the interrupt handlers have quiesced? 751 */ 752 pause("nvmereset", hz / 10); 753 if (status == 0) 754 nvme_ctrlr_start(ctrlr); 755 else 756 nvme_ctrlr_fail(ctrlr); 757 758 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 759} 760 761static void 762nvme_ctrlr_intx_handler(void *arg) 763{ 764 struct nvme_controller *ctrlr = arg; 765 766 nvme_mmio_write_4(ctrlr, intms, 1); 767 768 nvme_qpair_process_completions(&ctrlr->adminq); 769 770 if (ctrlr->ioq[0].cpl) 771 nvme_qpair_process_completions(&ctrlr->ioq[0]); 772 773 nvme_mmio_write_4(ctrlr, intmc, 1); 774} 775 776static int 777nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 778{ 779 780 ctrlr->num_io_queues = 1; 781 ctrlr->rid = 0; 782 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 783 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 784 785 if (ctrlr->res == NULL) { 786 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 787 return (ENOMEM); 788 } 789 790 bus_setup_intr(ctrlr->dev, ctrlr->res, 791 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 792 ctrlr, &ctrlr->tag); 793 794 if (ctrlr->tag == NULL) { 795 nvme_printf(ctrlr, "unable to setup intx handler\n"); 796 return (ENOMEM); 797 } 798 799 return (0); 800} 801 802static void 803nvme_pt_done(void *arg, const struct nvme_completion *cpl) 804{ 805 struct nvme_pt_command *pt = arg; 806 807 bzero(&pt->cpl, sizeof(pt->cpl)); 808 pt->cpl.cdw0 = cpl->cdw0; 809 pt->cpl.status = cpl->status; 810 pt->cpl.status.p = 0; 811 812 mtx_lock(pt->driver_lock); 813 wakeup(pt); 814 mtx_unlock(pt->driver_lock); 815} 816 817int 818nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 819 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 820 int is_admin_cmd) 821{ 822 struct nvme_request *req; 823 struct mtx *mtx; 824 struct buf *buf = NULL; 825 int ret = 0; 826 827 if (pt->len > 0) { 828 if (pt->len > ctrlr->max_xfer_size) { 829 nvme_printf(ctrlr, "pt->len (%d) " 830 "exceeds max_xfer_size (%d)\n", pt->len, 831 ctrlr->max_xfer_size); 832 return EIO; 833 } 834 if (is_user_buffer) { 835 /* 836 * Ensure the user buffer is wired for the duration of 837 * this passthrough command. 838 */ 839 PHOLD(curproc); 840 buf = getpbuf(NULL); 841 buf->b_saveaddr = buf->b_data; 842 buf->b_data = pt->buf; 843 buf->b_bufsize = pt->len; 844 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 845#ifdef NVME_UNMAPPED_BIO_SUPPORT 846 if (vmapbuf(buf, 1) < 0) { 847#else 848 if (vmapbuf(buf) < 0) { 849#endif 850 ret = EFAULT; 851 goto err; 852 } 853 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 854 nvme_pt_done, pt); 855 } else 856 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 857 nvme_pt_done, pt); 858 } else 859 req = nvme_allocate_request_null(nvme_pt_done, pt); 860 861 req->cmd.opc = pt->cmd.opc; 862 req->cmd.cdw10 = pt->cmd.cdw10; 863 req->cmd.cdw11 = pt->cmd.cdw11; 864 req->cmd.cdw12 = pt->cmd.cdw12; 865 req->cmd.cdw13 = pt->cmd.cdw13; 866 req->cmd.cdw14 = pt->cmd.cdw14; 867 req->cmd.cdw15 = pt->cmd.cdw15; 868 869 req->cmd.nsid = nsid; 870 871 if (is_admin_cmd) 872 mtx = &ctrlr->lock; 873 else 874 mtx = &ctrlr->ns[nsid-1].lock; 875 876 mtx_lock(mtx); 877 pt->driver_lock = mtx; 878 879 if (is_admin_cmd) 880 nvme_ctrlr_submit_admin_request(ctrlr, req); 881 else 882 nvme_ctrlr_submit_io_request(ctrlr, req); 883 884 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 885 mtx_unlock(mtx); 886 887 pt->driver_lock = NULL; 888 889err: 890 if (buf != NULL) { 891 relpbuf(buf, NULL); 892 PRELE(curproc); 893 } 894 895 return (ret); 896} 897 898static int 899nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 900 struct thread *td) 901{ 902 struct nvme_controller *ctrlr; 903 struct nvme_pt_command *pt; 904 905 ctrlr = cdev->si_drv1; 906 907 switch (cmd) { 908 case NVME_RESET_CONTROLLER: 909 nvme_ctrlr_reset(ctrlr); 910 break; 911 case NVME_PASSTHROUGH_CMD: 912 pt = (struct nvme_pt_command *)arg; 913 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 914 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 915 default: 916 return (ENOTTY); 917 } 918 919 return (0); 920} 921 922static struct cdevsw nvme_ctrlr_cdevsw = { 923 .d_version = D_VERSION, 924 .d_flags = 0, 925 .d_ioctl = nvme_ctrlr_ioctl 926}; 927 928int 929nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 930{ 931 union cap_lo_register cap_lo; 932 union cap_hi_register cap_hi; 933 int i, per_cpu_io_queues, rid; 934 int num_vectors_requested, num_vectors_allocated; 935 int status, timeout_period; 936 937 ctrlr->dev = dev; 938 939 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 940 941 status = nvme_ctrlr_allocate_bar(ctrlr); 942 943 if (status != 0) 944 return (status); 945 946 /* 947 * Software emulators may set the doorbell stride to something 948 * other than zero, but this driver is not set up to handle that. 949 */ 950 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 951 if (cap_hi.bits.dstrd != 0) 952 return (ENXIO); 953 954 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 955 956 /* Get ready timeout value from controller, in units of 500ms. */ 957 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 958 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 959 960 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 961 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 962 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 963 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 964 ctrlr->timeout_period = timeout_period; 965 966 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 967 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 968 969 per_cpu_io_queues = 1; 970 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 971 972 if (per_cpu_io_queues) 973 ctrlr->num_io_queues = mp_ncpus; 974 else 975 ctrlr->num_io_queues = 1; 976 977 ctrlr->force_intx = 0; 978 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 979 980 ctrlr->enable_aborts = 0; 981 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 982 983 ctrlr->msix_enabled = 1; 984 985 if (ctrlr->force_intx) { 986 ctrlr->msix_enabled = 0; 987 goto intx; 988 } 989 990 /* One vector per IO queue, plus one vector for admin queue. */ 991 num_vectors_requested = ctrlr->num_io_queues + 1; 992 993 /* 994 * If we cannot even allocate 2 vectors (one for admin, one for 995 * I/O), then revert to INTx. 996 */ 997 if (pci_msix_count(dev) < 2) { 998 ctrlr->msix_enabled = 0; 999 goto intx; 1000 } 1001 1002 if (pci_msix_count(dev) < num_vectors_requested) { 1003 ctrlr->num_io_queues = 1; 1004 num_vectors_requested = 2; /* one for admin, one for I/O */ 1005 } 1006 1007 num_vectors_allocated = num_vectors_requested; 1008 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1009 ctrlr->msix_enabled = 0; 1010 goto intx; 1011 } 1012 1013 if (num_vectors_allocated < num_vectors_requested) { 1014 if (num_vectors_allocated < 2) { 1015 pci_release_msi(dev); 1016 ctrlr->msix_enabled = 0; 1017 goto intx; 1018 } 1019 1020 ctrlr->num_io_queues = 1; 1021 /* 1022 * Release whatever vectors were allocated, and just 1023 * reallocate the two needed for the admin and single 1024 * I/O qpair. 1025 */ 1026 num_vectors_allocated = 2; 1027 pci_release_msi(dev); 1028 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) 1029 panic("could not reallocate any vectors\n"); 1030 if (num_vectors_allocated != 2) 1031 panic("could not reallocate 2 vectors\n"); 1032 } 1033 1034 /* 1035 * On earlier FreeBSD releases, there are reports that 1036 * pci_alloc_msix() can return successfully with all vectors 1037 * requested, but a subsequent bus_alloc_resource_any() 1038 * for one of those vectors fails. This issue occurs more 1039 * readily with multiple devices using per-CPU vectors. 1040 * To workaround this issue, try to allocate the resources now, 1041 * and fall back to INTx if we cannot allocate all of them. 1042 * This issue cannot be reproduced on more recent versions of 1043 * FreeBSD which have increased the maximum number of MSI-X 1044 * vectors, but adding the workaround makes it easier for 1045 * vendors wishing to import this driver into kernels based on 1046 * older versions of FreeBSD. 1047 */ 1048 for (i = 0; i < num_vectors_allocated; i++) { 1049 rid = i + 1; 1050 ctrlr->msi_res[i] = bus_alloc_resource_any(ctrlr->dev, 1051 SYS_RES_IRQ, &rid, RF_ACTIVE); 1052 1053 if (ctrlr->msi_res[i] == NULL) { 1054 ctrlr->msix_enabled = 0; 1055 while (i > 0) { 1056 i--; 1057 bus_release_resource(ctrlr->dev, 1058 SYS_RES_IRQ, 1059 rman_get_rid(ctrlr->msi_res[i]), 1060 ctrlr->msi_res[i]); 1061 } 1062 pci_release_msi(dev); 1063 nvme_printf(ctrlr, "could not obtain all MSI-X " 1064 "resources, reverting to intx\n"); 1065 break; 1066 } 1067 } 1068 1069intx: 1070 1071 if (!ctrlr->msix_enabled) 1072 nvme_ctrlr_configure_intx(ctrlr); 1073 1074 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1075 nvme_ctrlr_construct_admin_qpair(ctrlr); 1076 status = nvme_ctrlr_construct_io_qpairs(ctrlr); 1077 1078 if (status != 0) 1079 return (status); 1080 1081 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), 1082 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); 1083 1084 if (ctrlr->cdev == NULL) 1085 return (ENXIO); 1086 1087 ctrlr->cdev->si_drv1 = (void *)ctrlr; 1088 1089 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1090 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1091 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1092 1093 ctrlr->is_resetting = 0; 1094 ctrlr->is_initialized = 0; 1095 ctrlr->notification_sent = 0; 1096 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1097 1098 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1099 STAILQ_INIT(&ctrlr->fail_req); 1100 ctrlr->is_failed = FALSE; 1101 1102 return (0); 1103} 1104 1105void 1106nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1107{ 1108 int i; 1109 1110 /* 1111 * Notify the controller of a shutdown, even though this is due to 1112 * a driver unload, not a system shutdown (this path is not invoked 1113 * during shutdown). This ensures the controller receives a 1114 * shutdown notification in case the system is shutdown before 1115 * reloading the driver. 1116 */ 1117 nvme_ctrlr_shutdown(ctrlr); 1118 1119 nvme_ctrlr_disable(ctrlr); 1120 taskqueue_free(ctrlr->taskqueue); 1121 1122 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1123 nvme_ns_destruct(&ctrlr->ns[i]); 1124 1125 if (ctrlr->cdev) 1126 destroy_dev(ctrlr->cdev); 1127 1128 for (i = 0; i < ctrlr->num_io_queues; i++) { 1129 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1130 } 1131 1132 free(ctrlr->ioq, M_NVME); 1133 1134 nvme_admin_qpair_destroy(&ctrlr->adminq); 1135 1136 if (ctrlr->resource != NULL) { 1137 bus_release_resource(dev, SYS_RES_MEMORY, 1138 ctrlr->resource_id, ctrlr->resource); 1139 } 1140 1141 if (ctrlr->bar4_resource != NULL) { 1142 bus_release_resource(dev, SYS_RES_MEMORY, 1143 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1144 } 1145 1146 if (ctrlr->tag) 1147 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1148 1149 if (ctrlr->res) 1150 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1151 rman_get_rid(ctrlr->res), ctrlr->res); 1152 1153 if (ctrlr->msix_enabled) 1154 pci_release_msi(dev); 1155} 1156 1157void 1158nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1159{ 1160 union cc_register cc; 1161 union csts_register csts; 1162 int ticks = 0; 1163 1164 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1165 cc.bits.shn = NVME_SHN_NORMAL; 1166 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1167 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1168 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1169 pause("nvme shn", 1); 1170 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1171 } 1172 if (csts.bits.shst != NVME_SHST_COMPLETE) 1173 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1174 "of notification\n"); 1175} 1176 1177void 1178nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1179 struct nvme_request *req) 1180{ 1181 1182 nvme_qpair_submit_request(&ctrlr->adminq, req); 1183} 1184 1185void 1186nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1187 struct nvme_request *req) 1188{ 1189 struct nvme_qpair *qpair; 1190 1191 if (ctrlr->num_io_queues > 1) 1192 qpair = &ctrlr->ioq[curcpu]; 1193 else 1194 qpair = &ctrlr->ioq[0]; 1195 1196 nvme_qpair_submit_request(qpair, req); 1197} 1198 1199device_t 1200nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1201{ 1202 1203 return (ctrlr->dev); 1204} 1205 1206const struct nvme_controller_data * 1207nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1208{ 1209 1210 return (&ctrlr->cdata); 1211} 1212