nvme_ctrlr.c revision 248737
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_ctrlr.c 248737 2013-03-26 18:37:36Z jimharris $"); 29 30#include <sys/param.h> 31#include <sys/bus.h> 32#include <sys/conf.h> 33#include <sys/ioccom.h> 34#include <sys/smp.h> 35 36#include <dev/pci/pcireg.h> 37#include <dev/pci/pcivar.h> 38 39#include "nvme_private.h" 40 41static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 42 struct nvme_async_event_request *aer); 43 44static void 45nvme_ctrlr_cb(void *arg, const struct nvme_completion *status) 46{ 47 struct nvme_completion *cpl = arg; 48 struct mtx *mtx; 49 50 /* 51 * Copy status into the argument passed by the caller, so that 52 * the caller can check the status to determine if the 53 * the request passed or failed. 54 */ 55 memcpy(cpl, status, sizeof(*cpl)); 56 mtx = mtx_pool_find(mtxpool_sleep, cpl); 57 mtx_lock(mtx); 58 wakeup(cpl); 59 mtx_unlock(mtx); 60} 61 62static int 63nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 64{ 65 66 /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */ 67 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) 68 ctrlr->resource_id = PCIR_BAR(2); 69 else 70 ctrlr->resource_id = PCIR_BAR(0); 71 72 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 73 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); 74 75 if(ctrlr->resource == NULL) { 76 device_printf(ctrlr->dev, "unable to allocate pci resource\n"); 77 return (ENOMEM); 78 } 79 80 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 81 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 82 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 83 84 /* 85 * The NVMe spec allows for the MSI-X table to be placed behind 86 * BAR 4/5, separate from the control/doorbell registers. Always 87 * try to map this bar, because it must be mapped prior to calling 88 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 89 * bus_alloc_resource() will just return NULL which is OK. 90 */ 91 ctrlr->bar4_resource_id = PCIR_BAR(4); 92 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 93 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); 94 95 return (0); 96} 97 98#ifdef CHATHAM2 99static int 100nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr) 101{ 102 103 ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR); 104 ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev, 105 SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1, 106 RF_ACTIVE); 107 108 if(ctrlr->chatham_resource == NULL) { 109 device_printf(ctrlr->dev, "unable to alloc pci resource\n"); 110 return (ENOMEM); 111 } 112 113 ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource); 114 ctrlr->chatham_bus_handle = 115 rman_get_bushandle(ctrlr->chatham_resource); 116 117 return (0); 118} 119 120static void 121nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr) 122{ 123 uint64_t reg1, reg2, reg3; 124 uint64_t temp1, temp2; 125 uint32_t temp3; 126 uint32_t use_flash_timings = 0; 127 128 DELAY(10000); 129 130 temp3 = chatham_read_4(ctrlr, 0x8080); 131 132 device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3); 133 134 ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110; 135 ctrlr->chatham_size = ctrlr->chatham_lbas * 512; 136 137 device_printf(ctrlr->dev, "Chatham size: %jd\n", 138 (intmax_t)ctrlr->chatham_size); 139 140 reg1 = reg2 = reg3 = ctrlr->chatham_size - 1; 141 142 TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings); 143 if (use_flash_timings) { 144 device_printf(ctrlr->dev, "Chatham: using flash timings\n"); 145 temp1 = 0x00001b58000007d0LL; 146 temp2 = 0x000000cb00000131LL; 147 } else { 148 device_printf(ctrlr->dev, "Chatham: using DDR timings\n"); 149 temp1 = temp2 = 0x0LL; 150 } 151 152 chatham_write_8(ctrlr, 0x8000, reg1); 153 chatham_write_8(ctrlr, 0x8008, reg2); 154 chatham_write_8(ctrlr, 0x8010, reg3); 155 156 chatham_write_8(ctrlr, 0x8020, temp1); 157 temp3 = chatham_read_4(ctrlr, 0x8020); 158 159 chatham_write_8(ctrlr, 0x8028, temp2); 160 temp3 = chatham_read_4(ctrlr, 0x8028); 161 162 chatham_write_8(ctrlr, 0x8030, temp1); 163 chatham_write_8(ctrlr, 0x8038, temp2); 164 chatham_write_8(ctrlr, 0x8040, temp1); 165 chatham_write_8(ctrlr, 0x8048, temp2); 166 chatham_write_8(ctrlr, 0x8050, temp1); 167 chatham_write_8(ctrlr, 0x8058, temp2); 168 169 DELAY(10000); 170} 171 172static void 173nvme_chatham_populate_cdata(struct nvme_controller *ctrlr) 174{ 175 struct nvme_controller_data *cdata; 176 177 cdata = &ctrlr->cdata; 178 179 cdata->vid = 0x8086; 180 cdata->ssvid = 0x2011; 181 182 /* 183 * Chatham2 puts garbage data in these fields when we 184 * invoke IDENTIFY_CONTROLLER, so we need to re-zero 185 * the fields before calling bcopy(). 186 */ 187 memset(cdata->sn, 0, sizeof(cdata->sn)); 188 memcpy(cdata->sn, "2012", strlen("2012")); 189 memset(cdata->mn, 0, sizeof(cdata->mn)); 190 memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2")); 191 memset(cdata->fr, 0, sizeof(cdata->fr)); 192 memcpy(cdata->fr, "0", strlen("0")); 193 cdata->rab = 8; 194 cdata->aerl = 3; 195 cdata->lpa.ns_smart = 1; 196 cdata->sqes.min = 6; 197 cdata->sqes.max = 6; 198 cdata->sqes.min = 4; 199 cdata->sqes.max = 4; 200 cdata->nn = 1; 201 202 /* Chatham2 doesn't support DSM command */ 203 cdata->oncs.dsm = 0; 204 205 cdata->vwc.present = 1; 206} 207#endif /* CHATHAM2 */ 208 209static void 210nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 211{ 212 struct nvme_qpair *qpair; 213 uint32_t num_entries; 214 215 qpair = &ctrlr->adminq; 216 217 num_entries = NVME_ADMIN_ENTRIES; 218 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 219 /* 220 * If admin_entries was overridden to an invalid value, revert it 221 * back to our default value. 222 */ 223 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 224 num_entries > NVME_MAX_ADMIN_ENTRIES) { 225 printf("nvme: invalid hw.nvme.admin_entries=%d specified\n", 226 num_entries); 227 num_entries = NVME_ADMIN_ENTRIES; 228 } 229 230 /* 231 * The admin queue's max xfer size is treated differently than the 232 * max I/O xfer size. 16KB is sufficient here - maybe even less? 233 */ 234 nvme_qpair_construct(qpair, 235 0, /* qpair ID */ 236 0, /* vector */ 237 num_entries, 238 NVME_ADMIN_TRACKERS, 239 16*1024, /* max xfer size */ 240 ctrlr); 241} 242 243static int 244nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 245{ 246 struct nvme_qpair *qpair; 247 union cap_lo_register cap_lo; 248 int i, num_entries, num_trackers; 249 250 num_entries = NVME_IO_ENTRIES; 251 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 252 253 /* 254 * NVMe spec sets a hard limit of 64K max entries, but 255 * devices may specify a smaller limit, so we need to check 256 * the MQES field in the capabilities register. 257 */ 258 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 259 num_entries = min(num_entries, cap_lo.bits.mqes+1); 260 261 num_trackers = NVME_IO_TRACKERS; 262 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 263 264 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 265 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 266 /* 267 * No need to have more trackers than entries in the submit queue. 268 * Note also that for a queue size of N, we can only have (N-1) 269 * commands outstanding, hence the "-1" here. 270 */ 271 num_trackers = min(num_trackers, (num_entries-1)); 272 273 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 274 TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size); 275 /* 276 * Check that tunable doesn't specify a size greater than what our 277 * driver supports, and is an even PAGE_SIZE multiple. 278 */ 279 if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE || 280 ctrlr->max_xfer_size % PAGE_SIZE) 281 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 282 283 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 284 M_NVME, M_ZERO | M_NOWAIT); 285 286 if (ctrlr->ioq == NULL) 287 return (ENOMEM); 288 289 for (i = 0; i < ctrlr->num_io_queues; i++) { 290 qpair = &ctrlr->ioq[i]; 291 292 /* 293 * Admin queue has ID=0. IO queues start at ID=1 - 294 * hence the 'i+1' here. 295 * 296 * For I/O queues, use the controller-wide max_xfer_size 297 * calculated in nvme_attach(). 298 */ 299 nvme_qpair_construct(qpair, 300 i+1, /* qpair ID */ 301 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 302 num_entries, 303 num_trackers, 304 ctrlr->max_xfer_size, 305 ctrlr); 306 307 if (ctrlr->per_cpu_io_queues) 308 bus_bind_intr(ctrlr->dev, qpair->res, i); 309 } 310 311 return (0); 312} 313 314static int 315nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr) 316{ 317 int ms_waited; 318 union cc_register cc; 319 union csts_register csts; 320 321 cc.raw = nvme_mmio_read_4(ctrlr, cc); 322 csts.raw = nvme_mmio_read_4(ctrlr, csts); 323 324 if (!cc.bits.en) { 325 device_printf(ctrlr->dev, "%s called with cc.en = 0\n", 326 __func__); 327 return (ENXIO); 328 } 329 330 ms_waited = 0; 331 332 while (!csts.bits.rdy) { 333 DELAY(1000); 334 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 335 device_printf(ctrlr->dev, "controller did not become " 336 "ready within %d ms\n", ctrlr->ready_timeout_in_ms); 337 return (ENXIO); 338 } 339 csts.raw = nvme_mmio_read_4(ctrlr, csts); 340 } 341 342 return (0); 343} 344 345static void 346nvme_ctrlr_disable(struct nvme_controller *ctrlr) 347{ 348 union cc_register cc; 349 union csts_register csts; 350 351 cc.raw = nvme_mmio_read_4(ctrlr, cc); 352 csts.raw = nvme_mmio_read_4(ctrlr, csts); 353 354 if (cc.bits.en == 1 && csts.bits.rdy == 0) 355 nvme_ctrlr_wait_for_ready(ctrlr); 356 357 cc.bits.en = 0; 358 nvme_mmio_write_4(ctrlr, cc, cc.raw); 359 DELAY(5000); 360} 361 362static int 363nvme_ctrlr_enable(struct nvme_controller *ctrlr) 364{ 365 union cc_register cc; 366 union csts_register csts; 367 union aqa_register aqa; 368 369 cc.raw = nvme_mmio_read_4(ctrlr, cc); 370 csts.raw = nvme_mmio_read_4(ctrlr, csts); 371 372 if (cc.bits.en == 1) { 373 if (csts.bits.rdy == 1) 374 return (0); 375 else 376 return (nvme_ctrlr_wait_for_ready(ctrlr)); 377 } 378 379 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 380 DELAY(5000); 381 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 382 DELAY(5000); 383 384 aqa.raw = 0; 385 /* acqs and asqs are 0-based. */ 386 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 387 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 388 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 389 DELAY(5000); 390 391 cc.bits.en = 1; 392 cc.bits.css = 0; 393 cc.bits.ams = 0; 394 cc.bits.shn = 0; 395 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 396 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 397 398 /* This evaluates to 0, which is according to spec. */ 399 cc.bits.mps = (PAGE_SIZE >> 13); 400 401 nvme_mmio_write_4(ctrlr, cc, cc.raw); 402 DELAY(5000); 403 404 return (nvme_ctrlr_wait_for_ready(ctrlr)); 405} 406 407int 408nvme_ctrlr_reset(struct nvme_controller *ctrlr) 409{ 410 411 nvme_ctrlr_disable(ctrlr); 412 return (nvme_ctrlr_enable(ctrlr)); 413} 414 415static int 416nvme_ctrlr_identify(struct nvme_controller *ctrlr) 417{ 418 struct mtx *mtx; 419 struct nvme_completion cpl; 420 int status; 421 422 mtx = mtx_pool_find(mtxpool_sleep, &cpl); 423 424 mtx_lock(mtx); 425 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 426 nvme_ctrlr_cb, &cpl); 427 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5); 428 mtx_unlock(mtx); 429 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) { 430 printf("nvme_identify_controller failed!\n"); 431 return (ENXIO); 432 } 433 434#ifdef CHATHAM2 435 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) 436 nvme_chatham_populate_cdata(ctrlr); 437#endif 438 439 return (0); 440} 441 442static int 443nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 444{ 445 struct mtx *mtx; 446 struct nvme_completion cpl; 447 int cq_allocated, sq_allocated, status; 448 449 mtx = mtx_pool_find(mtxpool_sleep, &cpl); 450 451 mtx_lock(mtx); 452 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 453 nvme_ctrlr_cb, &cpl); 454 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5); 455 mtx_unlock(mtx); 456 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) { 457 printf("nvme_set_num_queues failed!\n"); 458 return (ENXIO); 459 } 460 461 /* 462 * Data in cdw0 is 0-based. 463 * Lower 16-bits indicate number of submission queues allocated. 464 * Upper 16-bits indicate number of completion queues allocated. 465 */ 466 sq_allocated = (cpl.cdw0 & 0xFFFF) + 1; 467 cq_allocated = (cpl.cdw0 >> 16) + 1; 468 469 /* 470 * Check that the controller was able to allocate the number of 471 * queues we requested. If not, revert to one IO queue. 472 */ 473 if (sq_allocated < ctrlr->num_io_queues || 474 cq_allocated < ctrlr->num_io_queues) { 475 ctrlr->num_io_queues = 1; 476 ctrlr->per_cpu_io_queues = 0; 477 478 /* TODO: destroy extra queues that were created 479 * previously but now found to be not needed. 480 */ 481 } 482 483 return (0); 484} 485 486static int 487nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 488{ 489 struct mtx *mtx; 490 struct nvme_qpair *qpair; 491 struct nvme_completion cpl; 492 int i, status; 493 494 mtx = mtx_pool_find(mtxpool_sleep, &cpl); 495 496 for (i = 0; i < ctrlr->num_io_queues; i++) { 497 qpair = &ctrlr->ioq[i]; 498 499 mtx_lock(mtx); 500 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 501 nvme_ctrlr_cb, &cpl); 502 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5); 503 mtx_unlock(mtx); 504 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) { 505 printf("nvme_create_io_cq failed!\n"); 506 return (ENXIO); 507 } 508 509 mtx_lock(mtx); 510 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 511 nvme_ctrlr_cb, &cpl); 512 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5); 513 mtx_unlock(mtx); 514 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) { 515 printf("nvme_create_io_sq failed!\n"); 516 return (ENXIO); 517 } 518 } 519 520 return (0); 521} 522 523static int 524nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 525{ 526 struct nvme_namespace *ns; 527 int i, status; 528 529 for (i = 0; i < ctrlr->cdata.nn; i++) { 530 ns = &ctrlr->ns[i]; 531 status = nvme_ns_construct(ns, i+1, ctrlr); 532 if (status != 0) 533 return (status); 534 } 535 536 return (0); 537} 538 539static void 540nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 541{ 542 struct nvme_async_event_request *aer = arg; 543 544 if (cpl->sf_sc == NVME_SC_ABORTED_SQ_DELETION) { 545 /* 546 * This is simulated when controller is being shut down, to 547 * effectively abort outstanding asynchronous event requests 548 * and make sure all memory is freed. Do not repost the 549 * request in this case. 550 */ 551 return; 552 } 553 554 /* TODO: decode async event type based on status */ 555 556 /* 557 * Repost another asynchronous event request to replace the one that 558 * just completed. 559 */ 560 printf("Asynchronous event occurred.\n"); 561 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 562} 563 564static void 565nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 566 struct nvme_async_event_request *aer) 567{ 568 struct nvme_request *req; 569 570 aer->ctrlr = ctrlr; 571 req = nvme_allocate_request(NULL, 0, nvme_ctrlr_async_event_cb, aer); 572 aer->req = req; 573 574 /* 575 * Override default timeout value here, since asynchronous event 576 * requests should by nature never be timed out. 577 */ 578 req->timeout = 0; 579 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 580 nvme_ctrlr_submit_admin_request(ctrlr, req); 581} 582 583static void 584nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 585{ 586 union nvme_critical_warning_state state; 587 struct nvme_async_event_request *aer; 588 uint32_t i; 589 590 state.raw = 0xFF; 591 state.bits.reserved = 0; 592 nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL); 593 594 /* aerl is a zero-based value, so we need to add 1 here. */ 595 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 596 597 /* Chatham doesn't support AERs. */ 598 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) 599 ctrlr->num_aers = 0; 600 601 for (i = 0; i < ctrlr->num_aers; i++) { 602 aer = &ctrlr->aer[i]; 603 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 604 } 605} 606 607static void 608nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 609{ 610 611 ctrlr->int_coal_time = 0; 612 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 613 &ctrlr->int_coal_time); 614 615 ctrlr->int_coal_threshold = 0; 616 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 617 &ctrlr->int_coal_threshold); 618 619 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 620 ctrlr->int_coal_threshold, NULL, NULL); 621} 622 623void 624nvme_ctrlr_start(void *ctrlr_arg) 625{ 626 struct nvme_controller *ctrlr = ctrlr_arg; 627 628 if (nvme_ctrlr_identify(ctrlr) != 0) 629 goto err; 630 631 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) 632 goto err; 633 634 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) 635 goto err; 636 637 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) 638 goto err; 639 640 nvme_ctrlr_configure_aer(ctrlr); 641 nvme_ctrlr_configure_int_coalescing(ctrlr); 642 643 ctrlr->is_started = TRUE; 644 645err: 646 647 /* 648 * Initialize sysctls, even if controller failed to start, to 649 * assist with debugging admin queue pair. 650 */ 651 nvme_sysctl_initialize_ctrlr(ctrlr); 652 config_intrhook_disestablish(&ctrlr->config_hook); 653} 654 655static void 656nvme_ctrlr_intx_handler(void *arg) 657{ 658 struct nvme_controller *ctrlr = arg; 659 660 nvme_mmio_write_4(ctrlr, intms, 1); 661 662 nvme_qpair_process_completions(&ctrlr->adminq); 663 664 if (ctrlr->ioq[0].cpl) 665 nvme_qpair_process_completions(&ctrlr->ioq[0]); 666 667 nvme_mmio_write_4(ctrlr, intmc, 1); 668} 669 670static int 671nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 672{ 673 674 ctrlr->num_io_queues = 1; 675 ctrlr->per_cpu_io_queues = 0; 676 ctrlr->rid = 0; 677 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 678 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 679 680 if (ctrlr->res == NULL) { 681 device_printf(ctrlr->dev, "unable to allocate shared IRQ\n"); 682 return (ENOMEM); 683 } 684 685 bus_setup_intr(ctrlr->dev, ctrlr->res, 686 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 687 ctrlr, &ctrlr->tag); 688 689 if (ctrlr->tag == NULL) { 690 device_printf(ctrlr->dev, 691 "unable to setup legacy interrupt handler\n"); 692 return (ENOMEM); 693 } 694 695 return (0); 696} 697 698static int 699nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 700 struct thread *td) 701{ 702 struct nvme_controller *ctrlr; 703 struct nvme_completion cpl; 704 struct mtx *mtx; 705 706 ctrlr = cdev->si_drv1; 707 708 switch (cmd) { 709 case NVME_IDENTIFY_CONTROLLER: 710#ifdef CHATHAM2 711 /* 712 * Don't refresh data on Chatham, since Chatham returns 713 * garbage on IDENTIFY anyways. 714 */ 715 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) { 716 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); 717 break; 718 } 719#endif 720 /* Refresh data before returning to user. */ 721 mtx = mtx_pool_find(mtxpool_sleep, &cpl); 722 mtx_lock(mtx); 723 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 724 nvme_ctrlr_cb, &cpl); 725 msleep(&cpl, mtx, PRIBIO, "nvme_ioctl", 0); 726 mtx_unlock(mtx); 727 if (cpl.sf_sc || cpl.sf_sct) 728 return (ENXIO); 729 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); 730 break; 731 default: 732 return (ENOTTY); 733 } 734 735 return (0); 736} 737 738static struct cdevsw nvme_ctrlr_cdevsw = { 739 .d_version = D_VERSION, 740 .d_flags = 0, 741 .d_ioctl = nvme_ctrlr_ioctl 742}; 743 744int 745nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 746{ 747 union cap_lo_register cap_lo; 748 union cap_hi_register cap_hi; 749 int num_vectors, per_cpu_io_queues, status = 0; 750 751 ctrlr->dev = dev; 752 ctrlr->is_started = FALSE; 753 754 status = nvme_ctrlr_allocate_bar(ctrlr); 755 756 if (status != 0) 757 return (status); 758 759#ifdef CHATHAM2 760 if (pci_get_devid(dev) == CHATHAM_PCI_ID) { 761 status = nvme_ctrlr_allocate_chatham_bar(ctrlr); 762 if (status != 0) 763 return (status); 764 nvme_ctrlr_setup_chatham(ctrlr); 765 } 766#endif 767 768 /* 769 * Software emulators may set the doorbell stride to something 770 * other than zero, but this driver is not set up to handle that. 771 */ 772 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 773 if (cap_hi.bits.dstrd != 0) 774 return (ENXIO); 775 776 /* Get ready timeout value from controller, in units of 500ms. */ 777 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 778 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 779 780 per_cpu_io_queues = 1; 781 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 782 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE; 783 784 if (ctrlr->per_cpu_io_queues) 785 ctrlr->num_io_queues = mp_ncpus; 786 else 787 ctrlr->num_io_queues = 1; 788 789 ctrlr->force_intx = 0; 790 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 791 792 ctrlr->msix_enabled = 1; 793 794 if (ctrlr->force_intx) { 795 ctrlr->msix_enabled = 0; 796 goto intx; 797 } 798 799 /* One vector per IO queue, plus one vector for admin queue. */ 800 num_vectors = ctrlr->num_io_queues + 1; 801 802 if (pci_msix_count(dev) < num_vectors) { 803 ctrlr->msix_enabled = 0; 804 goto intx; 805 } 806 807 if (pci_alloc_msix(dev, &num_vectors) != 0) 808 ctrlr->msix_enabled = 0; 809 810intx: 811 812 if (!ctrlr->msix_enabled) 813 nvme_ctrlr_configure_intx(ctrlr); 814 815 nvme_ctrlr_construct_admin_qpair(ctrlr); 816 817 status = nvme_ctrlr_construct_io_qpairs(ctrlr); 818 819 if (status != 0) 820 return (status); 821 822 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 823 "nvme%d", device_get_unit(dev)); 824 825 if (ctrlr->cdev == NULL) 826 return (ENXIO); 827 828 ctrlr->cdev->si_drv1 = (void *)ctrlr; 829 830 return (0); 831} 832 833void 834nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 835{ 836 struct nvme_namespace *ns; 837 int i; 838 839 for (i = 0; i < NVME_MAX_NAMESPACES; i++) { 840 ns = &ctrlr->ns[i]; 841 if (ns->cdev) 842 destroy_dev(ns->cdev); 843 } 844 845 if (ctrlr->cdev) 846 destroy_dev(ctrlr->cdev); 847 848 for (i = 0; i < ctrlr->num_io_queues; i++) { 849 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 850 } 851 852 free(ctrlr->ioq, M_NVME); 853 854 /* Manually abort outstanding async event requests. */ 855 for (i = 0; i < ctrlr->num_aers; i++) { 856 nvme_qpair_manual_abort_request(&ctrlr->adminq, 857 ctrlr->aer[i].req, NVME_SCT_GENERIC, 858 NVME_SC_ABORTED_SQ_DELETION, FALSE); 859 } 860 861 nvme_admin_qpair_destroy(&ctrlr->adminq); 862 863 if (ctrlr->resource != NULL) { 864 bus_release_resource(dev, SYS_RES_MEMORY, 865 ctrlr->resource_id, ctrlr->resource); 866 } 867 868 if (ctrlr->bar4_resource != NULL) { 869 bus_release_resource(dev, SYS_RES_MEMORY, 870 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 871 } 872 873#ifdef CHATHAM2 874 if (ctrlr->chatham_resource != NULL) { 875 bus_release_resource(dev, SYS_RES_MEMORY, 876 ctrlr->chatham_resource_id, ctrlr->chatham_resource); 877 } 878#endif 879 880 if (ctrlr->tag) 881 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 882 883 if (ctrlr->res) 884 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 885 rman_get_rid(ctrlr->res), ctrlr->res); 886 887 if (ctrlr->msix_enabled) 888 pci_release_msi(dev); 889} 890 891void 892nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 893 struct nvme_request *req) 894{ 895 896 nvme_qpair_submit_request(&ctrlr->adminq, req); 897} 898 899void 900nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 901 struct nvme_request *req) 902{ 903 struct nvme_qpair *qpair; 904 905 if (ctrlr->per_cpu_io_queues) 906 qpair = &ctrlr->ioq[curcpu]; 907 else 908 qpair = &ctrlr->ioq[0]; 909 910 nvme_qpair_submit_request(qpair, req); 911} 912