nvme_ctrlr.c revision 293671
1/*- 2 * Copyright (C) 2012-2015 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ctrlr.c 293671 2016-01-11 17:31:18Z jimharris $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/buf.h> 33#include <sys/bus.h> 34#include <sys/conf.h> 35#include <sys/ioccom.h> 36#include <sys/proc.h> 37#include <sys/smp.h> 38#include <sys/uio.h> 39 40#include <dev/pci/pcireg.h> 41#include <dev/pci/pcivar.h> 42 43#include "nvme_private.h" 44 45/* 46 * Used for calculating number of CPUs to assign to each core and number of I/O 47 * queues to allocate per controller. 48 */ 49#define NVME_CEILING(num, div) ((((num) - 1) / (div)) + 1) 50 51static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 52 struct nvme_async_event_request *aer); 53static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 54 55static int 56nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 57{ 58 59 ctrlr->resource_id = PCIR_BAR(0); 60 61 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 62 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); 63 64 if(ctrlr->resource == NULL) { 65 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 66 return (ENOMEM); 67 } 68 69 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 70 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 71 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 72 73 /* 74 * The NVMe spec allows for the MSI-X table to be placed behind 75 * BAR 4/5, separate from the control/doorbell registers. Always 76 * try to map this bar, because it must be mapped prior to calling 77 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 78 * bus_alloc_resource() will just return NULL which is OK. 79 */ 80 ctrlr->bar4_resource_id = PCIR_BAR(4); 81 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 82 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); 83 84 return (0); 85} 86 87static void 88nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 89{ 90 struct nvme_qpair *qpair; 91 uint32_t num_entries; 92 93 qpair = &ctrlr->adminq; 94 95 num_entries = NVME_ADMIN_ENTRIES; 96 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 97 /* 98 * If admin_entries was overridden to an invalid value, revert it 99 * back to our default value. 100 */ 101 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 102 num_entries > NVME_MAX_ADMIN_ENTRIES) { 103 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 104 "specified\n", num_entries); 105 num_entries = NVME_ADMIN_ENTRIES; 106 } 107 108 /* 109 * The admin queue's max xfer size is treated differently than the 110 * max I/O xfer size. 16KB is sufficient here - maybe even less? 111 */ 112 nvme_qpair_construct(qpair, 113 0, /* qpair ID */ 114 0, /* vector */ 115 num_entries, 116 NVME_ADMIN_TRACKERS, 117 ctrlr); 118} 119 120static int 121nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 122{ 123 struct nvme_qpair *qpair; 124 union cap_lo_register cap_lo; 125 int i, num_entries, num_trackers; 126 127 num_entries = NVME_IO_ENTRIES; 128 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 129 130 /* 131 * NVMe spec sets a hard limit of 64K max entries, but 132 * devices may specify a smaller limit, so we need to check 133 * the MQES field in the capabilities register. 134 */ 135 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 136 num_entries = min(num_entries, cap_lo.bits.mqes+1); 137 138 num_trackers = NVME_IO_TRACKERS; 139 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 140 141 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 142 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 143 /* 144 * No need to have more trackers than entries in the submit queue. 145 * Note also that for a queue size of N, we can only have (N-1) 146 * commands outstanding, hence the "-1" here. 147 */ 148 num_trackers = min(num_trackers, (num_entries-1)); 149 150 /* 151 * This was calculated previously when setting up interrupts, but 152 * a controller could theoretically support fewer I/O queues than 153 * MSI-X vectors. So calculate again here just to be safe. 154 */ 155 ctrlr->num_cpus_per_ioq = NVME_CEILING(mp_ncpus, ctrlr->num_io_queues); 156 157 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 158 M_NVME, M_ZERO | M_WAITOK); 159 160 for (i = 0; i < ctrlr->num_io_queues; i++) { 161 qpair = &ctrlr->ioq[i]; 162 163 /* 164 * Admin queue has ID=0. IO queues start at ID=1 - 165 * hence the 'i+1' here. 166 * 167 * For I/O queues, use the controller-wide max_xfer_size 168 * calculated in nvme_attach(). 169 */ 170 nvme_qpair_construct(qpair, 171 i+1, /* qpair ID */ 172 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 173 num_entries, 174 num_trackers, 175 ctrlr); 176 177 /* 178 * Do not bother binding interrupts if we only have one I/O 179 * interrupt thread for this controller. 180 */ 181 if (ctrlr->num_io_queues > 1) 182 bus_bind_intr(ctrlr->dev, qpair->res, 183 i * ctrlr->num_cpus_per_ioq); 184 } 185 186 return (0); 187} 188 189static void 190nvme_ctrlr_fail(struct nvme_controller *ctrlr) 191{ 192 int i; 193 194 ctrlr->is_failed = TRUE; 195 nvme_qpair_fail(&ctrlr->adminq); 196 for (i = 0; i < ctrlr->num_io_queues; i++) 197 nvme_qpair_fail(&ctrlr->ioq[i]); 198 nvme_notify_fail_consumers(ctrlr); 199} 200 201void 202nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 203 struct nvme_request *req) 204{ 205 206 mtx_lock(&ctrlr->lock); 207 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 208 mtx_unlock(&ctrlr->lock); 209 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 210} 211 212static void 213nvme_ctrlr_fail_req_task(void *arg, int pending) 214{ 215 struct nvme_controller *ctrlr = arg; 216 struct nvme_request *req; 217 218 mtx_lock(&ctrlr->lock); 219 while (!STAILQ_EMPTY(&ctrlr->fail_req)) { 220 req = STAILQ_FIRST(&ctrlr->fail_req); 221 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 222 nvme_qpair_manual_complete_request(req->qpair, req, 223 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 224 } 225 mtx_unlock(&ctrlr->lock); 226} 227 228static int 229nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 230{ 231 int ms_waited; 232 union cc_register cc; 233 union csts_register csts; 234 235 cc.raw = nvme_mmio_read_4(ctrlr, cc); 236 csts.raw = nvme_mmio_read_4(ctrlr, csts); 237 238 if (cc.bits.en != desired_val) { 239 nvme_printf(ctrlr, "%s called with desired_val = %d " 240 "but cc.en = %d\n", __func__, desired_val, cc.bits.en); 241 return (ENXIO); 242 } 243 244 ms_waited = 0; 245 246 while (csts.bits.rdy != desired_val) { 247 DELAY(1000); 248 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 249 nvme_printf(ctrlr, "controller ready did not become %d " 250 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 251 return (ENXIO); 252 } 253 csts.raw = nvme_mmio_read_4(ctrlr, csts); 254 } 255 256 return (0); 257} 258 259static void 260nvme_ctrlr_disable(struct nvme_controller *ctrlr) 261{ 262 union cc_register cc; 263 union csts_register csts; 264 265 cc.raw = nvme_mmio_read_4(ctrlr, cc); 266 csts.raw = nvme_mmio_read_4(ctrlr, csts); 267 268 if (cc.bits.en == 1 && csts.bits.rdy == 0) 269 nvme_ctrlr_wait_for_ready(ctrlr, 1); 270 271 cc.bits.en = 0; 272 nvme_mmio_write_4(ctrlr, cc, cc.raw); 273 DELAY(5000); 274 nvme_ctrlr_wait_for_ready(ctrlr, 0); 275} 276 277static int 278nvme_ctrlr_enable(struct nvme_controller *ctrlr) 279{ 280 union cc_register cc; 281 union csts_register csts; 282 union aqa_register aqa; 283 284 cc.raw = nvme_mmio_read_4(ctrlr, cc); 285 csts.raw = nvme_mmio_read_4(ctrlr, csts); 286 287 if (cc.bits.en == 1) { 288 if (csts.bits.rdy == 1) 289 return (0); 290 else 291 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 292 } 293 294 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 295 DELAY(5000); 296 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 297 DELAY(5000); 298 299 aqa.raw = 0; 300 /* acqs and asqs are 0-based. */ 301 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 302 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 303 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 304 DELAY(5000); 305 306 cc.bits.en = 1; 307 cc.bits.css = 0; 308 cc.bits.ams = 0; 309 cc.bits.shn = 0; 310 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 311 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 312 313 /* This evaluates to 0, which is according to spec. */ 314 cc.bits.mps = (PAGE_SIZE >> 13); 315 316 nvme_mmio_write_4(ctrlr, cc, cc.raw); 317 DELAY(5000); 318 319 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 320} 321 322int 323nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 324{ 325 int i; 326 327 nvme_admin_qpair_disable(&ctrlr->adminq); 328 /* 329 * I/O queues are not allocated before the initial HW 330 * reset, so do not try to disable them. Use is_initialized 331 * to determine if this is the initial HW reset. 332 */ 333 if (ctrlr->is_initialized) { 334 for (i = 0; i < ctrlr->num_io_queues; i++) 335 nvme_io_qpair_disable(&ctrlr->ioq[i]); 336 } 337 338 DELAY(100*1000); 339 340 nvme_ctrlr_disable(ctrlr); 341 return (nvme_ctrlr_enable(ctrlr)); 342} 343 344void 345nvme_ctrlr_reset(struct nvme_controller *ctrlr) 346{ 347 int cmpset; 348 349 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 350 351 if (cmpset == 0 || ctrlr->is_failed) 352 /* 353 * Controller is already resetting or has failed. Return 354 * immediately since there is no need to kick off another 355 * reset in these cases. 356 */ 357 return; 358 359 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 360} 361 362static int 363nvme_ctrlr_identify(struct nvme_controller *ctrlr) 364{ 365 struct nvme_completion_poll_status status; 366 367 status.done = FALSE; 368 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 369 nvme_completion_poll_cb, &status); 370 while (status.done == FALSE) 371 pause("nvme", 1); 372 if (nvme_completion_is_error(&status.cpl)) { 373 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 374 return (ENXIO); 375 } 376 377 /* 378 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 379 * controller supports. 380 */ 381 if (ctrlr->cdata.mdts > 0) 382 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 383 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 384 385 return (0); 386} 387 388static int 389nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 390{ 391 struct nvme_completion_poll_status status; 392 int cq_allocated, sq_allocated; 393 394 status.done = FALSE; 395 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 396 nvme_completion_poll_cb, &status); 397 while (status.done == FALSE) 398 pause("nvme", 1); 399 if (nvme_completion_is_error(&status.cpl)) { 400 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); 401 return (ENXIO); 402 } 403 404 /* 405 * Data in cdw0 is 0-based. 406 * Lower 16-bits indicate number of submission queues allocated. 407 * Upper 16-bits indicate number of completion queues allocated. 408 */ 409 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 410 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 411 412 /* 413 * Controller may allocate more queues than we requested, 414 * so use the minimum of the number requested and what was 415 * actually allocated. 416 */ 417 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 418 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 419 420 return (0); 421} 422 423static int 424nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 425{ 426 struct nvme_completion_poll_status status; 427 struct nvme_qpair *qpair; 428 int i; 429 430 for (i = 0; i < ctrlr->num_io_queues; i++) { 431 qpair = &ctrlr->ioq[i]; 432 433 status.done = FALSE; 434 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 435 nvme_completion_poll_cb, &status); 436 while (status.done == FALSE) 437 pause("nvme", 1); 438 if (nvme_completion_is_error(&status.cpl)) { 439 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 440 return (ENXIO); 441 } 442 443 status.done = FALSE; 444 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 445 nvme_completion_poll_cb, &status); 446 while (status.done == FALSE) 447 pause("nvme", 1); 448 if (nvme_completion_is_error(&status.cpl)) { 449 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 450 return (ENXIO); 451 } 452 } 453 454 return (0); 455} 456 457static int 458nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 459{ 460 struct nvme_namespace *ns; 461 int i, status; 462 463 for (i = 0; i < ctrlr->cdata.nn; i++) { 464 ns = &ctrlr->ns[i]; 465 status = nvme_ns_construct(ns, i+1, ctrlr); 466 if (status != 0) 467 return (status); 468 } 469 470 return (0); 471} 472 473static boolean_t 474is_log_page_id_valid(uint8_t page_id) 475{ 476 477 switch (page_id) { 478 case NVME_LOG_ERROR: 479 case NVME_LOG_HEALTH_INFORMATION: 480 case NVME_LOG_FIRMWARE_SLOT: 481 return (TRUE); 482 } 483 484 return (FALSE); 485} 486 487static uint32_t 488nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 489{ 490 uint32_t log_page_size; 491 492 switch (page_id) { 493 case NVME_LOG_ERROR: 494 log_page_size = min( 495 sizeof(struct nvme_error_information_entry) * 496 ctrlr->cdata.elpe, 497 NVME_MAX_AER_LOG_SIZE); 498 break; 499 case NVME_LOG_HEALTH_INFORMATION: 500 log_page_size = sizeof(struct nvme_health_information_page); 501 break; 502 case NVME_LOG_FIRMWARE_SLOT: 503 log_page_size = sizeof(struct nvme_firmware_page); 504 break; 505 default: 506 log_page_size = 0; 507 break; 508 } 509 510 return (log_page_size); 511} 512 513static void 514nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 515 union nvme_critical_warning_state state) 516{ 517 518 if (state.bits.available_spare == 1) 519 nvme_printf(ctrlr, "available spare space below threshold\n"); 520 521 if (state.bits.temperature == 1) 522 nvme_printf(ctrlr, "temperature above threshold\n"); 523 524 if (state.bits.device_reliability == 1) 525 nvme_printf(ctrlr, "device reliability degraded\n"); 526 527 if (state.bits.read_only == 1) 528 nvme_printf(ctrlr, "media placed in read only mode\n"); 529 530 if (state.bits.volatile_memory_backup == 1) 531 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 532 533 if (state.bits.reserved != 0) 534 nvme_printf(ctrlr, 535 "unknown critical warning(s): state = 0x%02x\n", state.raw); 536} 537 538static void 539nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 540{ 541 struct nvme_async_event_request *aer = arg; 542 struct nvme_health_information_page *health_info; 543 544 /* 545 * If the log page fetch for some reason completed with an error, 546 * don't pass log page data to the consumers. In practice, this case 547 * should never happen. 548 */ 549 if (nvme_completion_is_error(cpl)) 550 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 551 aer->log_page_id, NULL, 0); 552 else { 553 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 554 health_info = (struct nvme_health_information_page *) 555 aer->log_page_buffer; 556 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 557 health_info->critical_warning); 558 /* 559 * Critical warnings reported through the 560 * SMART/health log page are persistent, so 561 * clear the associated bits in the async event 562 * config so that we do not receive repeated 563 * notifications for the same event. 564 */ 565 aer->ctrlr->async_event_config.raw &= 566 ~health_info->critical_warning.raw; 567 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 568 aer->ctrlr->async_event_config, NULL, NULL); 569 } 570 571 572 /* 573 * Pass the cpl data from the original async event completion, 574 * not the log page fetch. 575 */ 576 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 577 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 578 } 579 580 /* 581 * Repost another asynchronous event request to replace the one 582 * that just completed. 583 */ 584 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 585} 586 587static void 588nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 589{ 590 struct nvme_async_event_request *aer = arg; 591 592 if (nvme_completion_is_error(cpl)) { 593 /* 594 * Do not retry failed async event requests. This avoids 595 * infinite loops where a new async event request is submitted 596 * to replace the one just failed, only to fail again and 597 * perpetuate the loop. 598 */ 599 return; 600 } 601 602 /* Associated log page is in bits 23:16 of completion entry dw0. */ 603 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 604 605 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 606 aer->log_page_id); 607 608 if (is_log_page_id_valid(aer->log_page_id)) { 609 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 610 aer->log_page_id); 611 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 612 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 613 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 614 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 615 aer); 616 /* Wait to notify consumers until after log page is fetched. */ 617 } else { 618 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 619 NULL, 0); 620 621 /* 622 * Repost another asynchronous event request to replace the one 623 * that just completed. 624 */ 625 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 626 } 627} 628 629static void 630nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 631 struct nvme_async_event_request *aer) 632{ 633 struct nvme_request *req; 634 635 aer->ctrlr = ctrlr; 636 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 637 aer->req = req; 638 639 /* 640 * Disable timeout here, since asynchronous event requests should by 641 * nature never be timed out. 642 */ 643 req->timeout = FALSE; 644 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 645 nvme_ctrlr_submit_admin_request(ctrlr, req); 646} 647 648static void 649nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 650{ 651 struct nvme_completion_poll_status status; 652 struct nvme_async_event_request *aer; 653 uint32_t i; 654 655 ctrlr->async_event_config.raw = 0xFF; 656 ctrlr->async_event_config.bits.reserved = 0; 657 658 status.done = FALSE; 659 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 660 0, NULL, 0, nvme_completion_poll_cb, &status); 661 while (status.done == FALSE) 662 pause("nvme", 1); 663 if (nvme_completion_is_error(&status.cpl) || 664 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 665 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 666 nvme_printf(ctrlr, "temperature threshold not supported\n"); 667 ctrlr->async_event_config.bits.temperature = 0; 668 } 669 670 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 671 ctrlr->async_event_config, NULL, NULL); 672 673 /* aerl is a zero-based value, so we need to add 1 here. */ 674 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 675 676 for (i = 0; i < ctrlr->num_aers; i++) { 677 aer = &ctrlr->aer[i]; 678 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 679 } 680} 681 682static void 683nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 684{ 685 686 ctrlr->int_coal_time = 0; 687 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 688 &ctrlr->int_coal_time); 689 690 ctrlr->int_coal_threshold = 0; 691 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 692 &ctrlr->int_coal_threshold); 693 694 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 695 ctrlr->int_coal_threshold, NULL, NULL); 696} 697 698static void 699nvme_ctrlr_start(void *ctrlr_arg) 700{ 701 struct nvme_controller *ctrlr = ctrlr_arg; 702 uint32_t old_num_io_queues; 703 int i; 704 705 /* 706 * Only reset adminq here when we are restarting the 707 * controller after a reset. During initialization, 708 * we have already submitted admin commands to get 709 * the number of I/O queues supported, so cannot reset 710 * the adminq again here. 711 */ 712 if (ctrlr->is_resetting) { 713 nvme_qpair_reset(&ctrlr->adminq); 714 } 715 716 for (i = 0; i < ctrlr->num_io_queues; i++) 717 nvme_qpair_reset(&ctrlr->ioq[i]); 718 719 nvme_admin_qpair_enable(&ctrlr->adminq); 720 721 if (nvme_ctrlr_identify(ctrlr) != 0) { 722 nvme_ctrlr_fail(ctrlr); 723 return; 724 } 725 726 /* 727 * The number of qpairs are determined during controller initialization, 728 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 729 * HW limit. We call SET_FEATURES again here so that it gets called 730 * after any reset for controllers that depend on the driver to 731 * explicit specify how many queues it will use. This value should 732 * never change between resets, so panic if somehow that does happen. 733 */ 734 old_num_io_queues = ctrlr->num_io_queues; 735 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 736 nvme_ctrlr_fail(ctrlr); 737 return; 738 } 739 740 if (old_num_io_queues != ctrlr->num_io_queues) { 741 panic("num_io_queues changed from %u to %u", old_num_io_queues, 742 ctrlr->num_io_queues); 743 } 744 745 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 746 nvme_ctrlr_fail(ctrlr); 747 return; 748 } 749 750 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 751 nvme_ctrlr_fail(ctrlr); 752 return; 753 } 754 755 nvme_ctrlr_configure_aer(ctrlr); 756 nvme_ctrlr_configure_int_coalescing(ctrlr); 757 758 for (i = 0; i < ctrlr->num_io_queues; i++) 759 nvme_io_qpair_enable(&ctrlr->ioq[i]); 760} 761 762void 763nvme_ctrlr_start_config_hook(void *arg) 764{ 765 struct nvme_controller *ctrlr = arg; 766 767 nvme_qpair_reset(&ctrlr->adminq); 768 nvme_admin_qpair_enable(&ctrlr->adminq); 769 770 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 771 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 772 nvme_ctrlr_start(ctrlr); 773 else 774 nvme_ctrlr_fail(ctrlr); 775 776 nvme_sysctl_initialize_ctrlr(ctrlr); 777 config_intrhook_disestablish(&ctrlr->config_hook); 778 779 ctrlr->is_initialized = 1; 780 nvme_notify_new_controller(ctrlr); 781} 782 783static void 784nvme_ctrlr_reset_task(void *arg, int pending) 785{ 786 struct nvme_controller *ctrlr = arg; 787 int status; 788 789 nvme_printf(ctrlr, "resetting controller\n"); 790 status = nvme_ctrlr_hw_reset(ctrlr); 791 /* 792 * Use pause instead of DELAY, so that we yield to any nvme interrupt 793 * handlers on this CPU that were blocked on a qpair lock. We want 794 * all nvme interrupts completed before proceeding with restarting the 795 * controller. 796 * 797 * XXX - any way to guarantee the interrupt handlers have quiesced? 798 */ 799 pause("nvmereset", hz / 10); 800 if (status == 0) 801 nvme_ctrlr_start(ctrlr); 802 else 803 nvme_ctrlr_fail(ctrlr); 804 805 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 806} 807 808static void 809nvme_ctrlr_intx_handler(void *arg) 810{ 811 struct nvme_controller *ctrlr = arg; 812 813 nvme_mmio_write_4(ctrlr, intms, 1); 814 815 nvme_qpair_process_completions(&ctrlr->adminq); 816 817 if (ctrlr->ioq[0].cpl) 818 nvme_qpair_process_completions(&ctrlr->ioq[0]); 819 820 nvme_mmio_write_4(ctrlr, intmc, 1); 821} 822 823static int 824nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 825{ 826 827 ctrlr->msix_enabled = 0; 828 ctrlr->num_io_queues = 1; 829 ctrlr->num_cpus_per_ioq = mp_ncpus; 830 ctrlr->rid = 0; 831 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 832 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 833 834 if (ctrlr->res == NULL) { 835 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 836 return (ENOMEM); 837 } 838 839 bus_setup_intr(ctrlr->dev, ctrlr->res, 840 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 841 ctrlr, &ctrlr->tag); 842 843 if (ctrlr->tag == NULL) { 844 nvme_printf(ctrlr, "unable to setup intx handler\n"); 845 return (ENOMEM); 846 } 847 848 return (0); 849} 850 851static void 852nvme_pt_done(void *arg, const struct nvme_completion *cpl) 853{ 854 struct nvme_pt_command *pt = arg; 855 856 bzero(&pt->cpl, sizeof(pt->cpl)); 857 pt->cpl.cdw0 = cpl->cdw0; 858 pt->cpl.status = cpl->status; 859 pt->cpl.status.p = 0; 860 861 mtx_lock(pt->driver_lock); 862 wakeup(pt); 863 mtx_unlock(pt->driver_lock); 864} 865 866int 867nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 868 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 869 int is_admin_cmd) 870{ 871 struct nvme_request *req; 872 struct mtx *mtx; 873 struct buf *buf = NULL; 874 int ret = 0; 875 876 if (pt->len > 0) { 877 if (pt->len > ctrlr->max_xfer_size) { 878 nvme_printf(ctrlr, "pt->len (%d) " 879 "exceeds max_xfer_size (%d)\n", pt->len, 880 ctrlr->max_xfer_size); 881 return EIO; 882 } 883 if (is_user_buffer) { 884 /* 885 * Ensure the user buffer is wired for the duration of 886 * this passthrough command. 887 */ 888 PHOLD(curproc); 889 buf = getpbuf(NULL); 890 buf->b_saveaddr = buf->b_data; 891 buf->b_data = pt->buf; 892 buf->b_bufsize = pt->len; 893 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 894#ifdef NVME_UNMAPPED_BIO_SUPPORT 895 if (vmapbuf(buf, 1) < 0) { 896#else 897 if (vmapbuf(buf) < 0) { 898#endif 899 ret = EFAULT; 900 goto err; 901 } 902 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 903 nvme_pt_done, pt); 904 } else 905 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 906 nvme_pt_done, pt); 907 } else 908 req = nvme_allocate_request_null(nvme_pt_done, pt); 909 910 req->cmd.opc = pt->cmd.opc; 911 req->cmd.cdw10 = pt->cmd.cdw10; 912 req->cmd.cdw11 = pt->cmd.cdw11; 913 req->cmd.cdw12 = pt->cmd.cdw12; 914 req->cmd.cdw13 = pt->cmd.cdw13; 915 req->cmd.cdw14 = pt->cmd.cdw14; 916 req->cmd.cdw15 = pt->cmd.cdw15; 917 918 req->cmd.nsid = nsid; 919 920 if (is_admin_cmd) 921 mtx = &ctrlr->lock; 922 else 923 mtx = &ctrlr->ns[nsid-1].lock; 924 925 mtx_lock(mtx); 926 pt->driver_lock = mtx; 927 928 if (is_admin_cmd) 929 nvme_ctrlr_submit_admin_request(ctrlr, req); 930 else 931 nvme_ctrlr_submit_io_request(ctrlr, req); 932 933 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 934 mtx_unlock(mtx); 935 936 pt->driver_lock = NULL; 937 938err: 939 if (buf != NULL) { 940 relpbuf(buf, NULL); 941 PRELE(curproc); 942 } 943 944 return (ret); 945} 946 947static int 948nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 949 struct thread *td) 950{ 951 struct nvme_controller *ctrlr; 952 struct nvme_pt_command *pt; 953 954 ctrlr = cdev->si_drv1; 955 956 switch (cmd) { 957 case NVME_RESET_CONTROLLER: 958 nvme_ctrlr_reset(ctrlr); 959 break; 960 case NVME_PASSTHROUGH_CMD: 961 pt = (struct nvme_pt_command *)arg; 962 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 963 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 964 default: 965 return (ENOTTY); 966 } 967 968 return (0); 969} 970 971static struct cdevsw nvme_ctrlr_cdevsw = { 972 .d_version = D_VERSION, 973 .d_flags = 0, 974 .d_ioctl = nvme_ctrlr_ioctl 975}; 976 977static void 978nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 979{ 980 device_t dev; 981 int per_cpu_io_queues; 982 int num_vectors_requested, num_vectors_allocated; 983 int num_vectors_available; 984 985 dev = ctrlr->dev; 986 per_cpu_io_queues = 1; 987 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 988 989 ctrlr->force_intx = 0; 990 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 991 992 /* 993 * FreeBSD currently cannot allocate more than about 190 vectors at 994 * boot, meaning that systems with high core count and many devices 995 * requesting per-CPU interrupt vectors will not get their full 996 * allotment. So first, try to allocate as many as we may need to 997 * understand what is available, then immediately release them. 998 * Then figure out how many of those we will actually use, based on 999 * assigning an equal number of cores to each I/O queue. 1000 */ 1001 1002 /* One vector for per core I/O queue, plus one vector for admin queue. */ 1003 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 1004 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 1005 num_vectors_available = 0; 1006 } 1007 pci_release_msi(dev); 1008 1009 if (ctrlr->force_intx || num_vectors_available < 2) { 1010 nvme_ctrlr_configure_intx(ctrlr); 1011 return; 1012 } 1013 1014 if (per_cpu_io_queues) 1015 ctrlr->num_cpus_per_ioq = NVME_CEILING(mp_ncpus, num_vectors_available + 1); 1016 else 1017 ctrlr->num_cpus_per_ioq = mp_ncpus; 1018 1019 ctrlr->num_io_queues = NVME_CEILING(mp_ncpus, ctrlr->num_cpus_per_ioq); 1020 num_vectors_requested = ctrlr->num_io_queues + 1; 1021 num_vectors_allocated = num_vectors_requested; 1022 1023 /* 1024 * Now just allocate the number of vectors we need. This should 1025 * succeed, since we previously called pci_alloc_msix() 1026 * successfully returning at least this many vectors, but just to 1027 * be safe, if something goes wrong just revert to INTx. 1028 */ 1029 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1030 nvme_ctrlr_configure_intx(ctrlr); 1031 return; 1032 } 1033 1034 if (num_vectors_allocated < num_vectors_requested) { 1035 pci_release_msi(dev); 1036 nvme_ctrlr_configure_intx(ctrlr); 1037 return; 1038 } 1039 1040 ctrlr->msix_enabled = 1; 1041} 1042 1043int 1044nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1045{ 1046 union cap_lo_register cap_lo; 1047 union cap_hi_register cap_hi; 1048 int status, timeout_period; 1049 1050 ctrlr->dev = dev; 1051 1052 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1053 1054 status = nvme_ctrlr_allocate_bar(ctrlr); 1055 1056 if (status != 0) 1057 return (status); 1058 1059 /* 1060 * Software emulators may set the doorbell stride to something 1061 * other than zero, but this driver is not set up to handle that. 1062 */ 1063 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 1064 if (cap_hi.bits.dstrd != 0) 1065 return (ENXIO); 1066 1067 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 1068 1069 /* Get ready timeout value from controller, in units of 500ms. */ 1070 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 1071 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 1072 1073 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1074 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1075 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1076 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1077 ctrlr->timeout_period = timeout_period; 1078 1079 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1080 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1081 1082 ctrlr->enable_aborts = 0; 1083 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1084 1085 nvme_ctrlr_setup_interrupts(ctrlr); 1086 1087 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1088 nvme_ctrlr_construct_admin_qpair(ctrlr); 1089 1090 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), 1091 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); 1092 1093 if (ctrlr->cdev == NULL) 1094 return (ENXIO); 1095 1096 ctrlr->cdev->si_drv1 = (void *)ctrlr; 1097 1098 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1099 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1100 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1101 1102 ctrlr->is_resetting = 0; 1103 ctrlr->is_initialized = 0; 1104 ctrlr->notification_sent = 0; 1105 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1106 1107 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1108 STAILQ_INIT(&ctrlr->fail_req); 1109 ctrlr->is_failed = FALSE; 1110 1111 return (0); 1112} 1113 1114void 1115nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1116{ 1117 int i; 1118 1119 /* 1120 * Notify the controller of a shutdown, even though this is due to 1121 * a driver unload, not a system shutdown (this path is not invoked 1122 * during shutdown). This ensures the controller receives a 1123 * shutdown notification in case the system is shutdown before 1124 * reloading the driver. 1125 */ 1126 nvme_ctrlr_shutdown(ctrlr); 1127 1128 nvme_ctrlr_disable(ctrlr); 1129 taskqueue_free(ctrlr->taskqueue); 1130 1131 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1132 nvme_ns_destruct(&ctrlr->ns[i]); 1133 1134 if (ctrlr->cdev) 1135 destroy_dev(ctrlr->cdev); 1136 1137 for (i = 0; i < ctrlr->num_io_queues; i++) { 1138 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1139 } 1140 1141 free(ctrlr->ioq, M_NVME); 1142 1143 nvme_admin_qpair_destroy(&ctrlr->adminq); 1144 1145 if (ctrlr->resource != NULL) { 1146 bus_release_resource(dev, SYS_RES_MEMORY, 1147 ctrlr->resource_id, ctrlr->resource); 1148 } 1149 1150 if (ctrlr->bar4_resource != NULL) { 1151 bus_release_resource(dev, SYS_RES_MEMORY, 1152 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1153 } 1154 1155 if (ctrlr->tag) 1156 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1157 1158 if (ctrlr->res) 1159 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1160 rman_get_rid(ctrlr->res), ctrlr->res); 1161 1162 if (ctrlr->msix_enabled) 1163 pci_release_msi(dev); 1164} 1165 1166void 1167nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1168{ 1169 union cc_register cc; 1170 union csts_register csts; 1171 int ticks = 0; 1172 1173 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1174 cc.bits.shn = NVME_SHN_NORMAL; 1175 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1176 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1177 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1178 pause("nvme shn", 1); 1179 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1180 } 1181 if (csts.bits.shst != NVME_SHST_COMPLETE) 1182 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1183 "of notification\n"); 1184} 1185 1186void 1187nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1188 struct nvme_request *req) 1189{ 1190 1191 nvme_qpair_submit_request(&ctrlr->adminq, req); 1192} 1193 1194void 1195nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1196 struct nvme_request *req) 1197{ 1198 struct nvme_qpair *qpair; 1199 1200 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; 1201 nvme_qpair_submit_request(qpair, req); 1202} 1203 1204device_t 1205nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1206{ 1207 1208 return (ctrlr->dev); 1209} 1210 1211const struct nvme_controller_data * 1212nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1213{ 1214 1215 return (&ctrlr->cdata); 1216} 1217