1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31#ifndef __NVME_PRIVATE_H__ 32#define __NVME_PRIVATE_H__ 33 34#include <sys/param.h> 35#include <sys/bio.h> 36#include <sys/bus.h> 37#include <sys/kernel.h> 38#include <sys/lock.h> 39#include <sys/malloc.h> 40#include <sys/module.h> 41#include <sys/mutex.h> 42#include <sys/rman.h> 43#include <sys/systm.h> 44#include <sys/taskqueue.h> 45 46#include <vm/uma.h> 47 48#include <machine/bus.h> 49 50#include "nvme.h" 51 52#define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 53 54MALLOC_DECLARE(M_NVME); 55 56#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 57#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 58 59#define NVME_ADMIN_TRACKERS (16) 60#define NVME_ADMIN_ENTRIES (128) 61/* min and max are defined in admin queue attributes section of spec */ 62#define NVME_MIN_ADMIN_ENTRIES (2) 63#define NVME_MAX_ADMIN_ENTRIES (4096) 64 65/* 66 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 67 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 68 * will allow outstanding on an I/O qpair at any time. The only advantage in 69 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 70 * the contents of the submission and completion queues, it will show a longer 71 * history of data. 72 */ 73#define NVME_IO_ENTRIES (256) 74#define NVME_IO_TRACKERS (128) 75#define NVME_MIN_IO_TRACKERS (4) 76#define NVME_MAX_IO_TRACKERS (1024) 77 78/* 79 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES 80 * for each controller. 81 */ 82 83#define NVME_INT_COAL_TIME (0) /* disabled */ 84#define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 85 86#define NVME_MAX_NAMESPACES (16) 87#define NVME_MAX_CONSUMERS (2) 88#define NVME_MAX_ASYNC_EVENTS (8) 89 90#define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 91#define NVME_MIN_TIMEOUT_PERIOD (5) 92#define NVME_MAX_TIMEOUT_PERIOD (120) 93 94#define NVME_DEFAULT_RETRY_COUNT (4) 95 96/* Maximum log page size to fetch for AERs. */ 97#define NVME_MAX_AER_LOG_SIZE (4096) 98 99/* 100 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define 101 * it. 102 */ 103#ifndef CACHE_LINE_SIZE 104#define CACHE_LINE_SIZE (64) 105#endif 106 107extern uma_zone_t nvme_request_zone; 108extern int32_t nvme_retry_count; 109extern bool nvme_verbose_cmd_dump; 110 111struct nvme_completion_poll_status { 112 113 struct nvme_completion cpl; 114 int done; 115}; 116 117extern devclass_t nvme_devclass; 118 119#define NVME_REQUEST_VADDR 1 120#define NVME_REQUEST_NULL 2 /* For requests with no payload. */ 121#define NVME_REQUEST_UIO 3 122#define NVME_REQUEST_BIO 4 123#define NVME_REQUEST_CCB 5 124 125struct nvme_request { 126 127 struct nvme_command cmd; 128 struct nvme_qpair *qpair; 129 union { 130 void *payload; 131 struct bio *bio; 132 } u; 133 uint32_t type; 134 uint32_t payload_size; 135 bool timeout; 136 nvme_cb_fn_t cb_fn; 137 void *cb_arg; 138 int32_t retries; 139 STAILQ_ENTRY(nvme_request) stailq; 140}; 141 142struct nvme_async_event_request { 143 144 struct nvme_controller *ctrlr; 145 struct nvme_request *req; 146 struct nvme_completion cpl; 147 uint32_t log_page_id; 148 uint32_t log_page_size; 149 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE]; 150}; 151 152struct nvme_tracker { 153 154 TAILQ_ENTRY(nvme_tracker) tailq; 155 struct nvme_request *req; 156 struct nvme_qpair *qpair; 157 struct callout timer; 158 bus_dmamap_t payload_dma_map; 159 uint16_t cid; 160 161 uint64_t *prp; 162 bus_addr_t prp_bus_addr; 163}; 164 165struct nvme_qpair { 166 167 struct nvme_controller *ctrlr; 168 uint32_t id; 169 int domain; 170 int cpu; 171 172 uint16_t vector; 173 int rid; 174 struct resource *res; 175 void *tag; 176 177 uint32_t num_entries; 178 uint32_t num_trackers; 179 uint32_t sq_tdbl_off; 180 uint32_t cq_hdbl_off; 181 182 uint32_t phase; 183 uint32_t sq_head; 184 uint32_t sq_tail; 185 uint32_t cq_head; 186 187 int64_t num_cmds; 188 int64_t num_intr_handler_calls; 189 int64_t num_retries; 190 int64_t num_failures; 191 192 struct nvme_command *cmd; 193 struct nvme_completion *cpl; 194 195 bus_dma_tag_t dma_tag; 196 bus_dma_tag_t dma_tag_payload; 197 198 bus_dmamap_t queuemem_map; 199 uint64_t cmd_bus_addr; 200 uint64_t cpl_bus_addr; 201 202 TAILQ_HEAD(, nvme_tracker) free_tr; 203 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 204 STAILQ_HEAD(, nvme_request) queued_req; 205 206 struct nvme_tracker **act_tr; 207 208 bool is_enabled; 209 210 struct mtx lock __aligned(CACHE_LINE_SIZE); 211 212} __aligned(CACHE_LINE_SIZE); 213 214struct nvme_namespace { 215 216 struct nvme_controller *ctrlr; 217 struct nvme_namespace_data data; 218 uint32_t id; 219 uint32_t flags; 220 struct cdev *cdev; 221 void *cons_cookie[NVME_MAX_CONSUMERS]; 222 uint32_t boundary; 223 struct mtx lock; 224}; 225 226/* 227 * One of these per allocated PCI device. 228 */ 229struct nvme_controller { 230 231 device_t dev; 232 233 struct mtx lock; 234 int domain; 235 uint32_t ready_timeout_in_ms; 236 uint32_t quirks; 237#define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ 238#define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */ 239 240 bus_space_tag_t bus_tag; 241 bus_space_handle_t bus_handle; 242 int resource_id; 243 struct resource *resource; 244 245 /* 246 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 247 * separate from the control registers which are in BAR 0/1. These 248 * members track the mapping of BAR 4/5 for that reason. 249 */ 250 int bar4_resource_id; 251 struct resource *bar4_resource; 252 253 uint32_t msix_enabled; 254 uint32_t enable_aborts; 255 256 uint32_t num_io_queues; 257 uint32_t max_hw_pend_io; 258 259 /* Fields for tracking progress during controller initialization. */ 260 struct intr_config_hook config_hook; 261 uint32_t ns_identified; 262 uint32_t queues_created; 263 264 struct task reset_task; 265 struct task fail_req_task; 266 struct taskqueue *taskqueue; 267 268 /* For shared legacy interrupt. */ 269 int rid; 270 struct resource *res; 271 void *tag; 272 273 /** maximum i/o size in bytes */ 274 uint32_t max_xfer_size; 275 276 /** minimum page size supported by this controller in bytes */ 277 uint32_t min_page_size; 278 279 /** interrupt coalescing time period (in microseconds) */ 280 uint32_t int_coal_time; 281 282 /** interrupt coalescing threshold */ 283 uint32_t int_coal_threshold; 284 285 /** timeout period in seconds */ 286 uint32_t timeout_period; 287 288 /** doorbell stride */ 289 uint32_t dstrd; 290 291 struct nvme_qpair adminq; 292 struct nvme_qpair *ioq; 293 294 struct nvme_registers *regs; 295 296 struct nvme_controller_data cdata; 297 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 298 299 struct cdev *cdev; 300 301 /** bit mask of event types currently enabled for async events */ 302 uint32_t async_event_config; 303 304 uint32_t num_aers; 305 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 306 307 void *cons_cookie[NVME_MAX_CONSUMERS]; 308 309 uint32_t is_resetting; 310 uint32_t is_initialized; 311 uint32_t notification_sent; 312 313 bool is_failed; 314 STAILQ_HEAD(, nvme_request) fail_req; 315 316 /* Host Memory Buffer */ 317 int hmb_nchunks; 318 size_t hmb_chunk; 319 bus_dma_tag_t hmb_tag; 320 struct nvme_hmb_chunk { 321 bus_dmamap_t hmbc_map; 322 void *hmbc_vaddr; 323 uint64_t hmbc_paddr; 324 } *hmb_chunks; 325 bus_dma_tag_t hmb_desc_tag; 326 bus_dmamap_t hmb_desc_map; 327 struct nvme_hmb_desc *hmb_desc_vaddr; 328 uint64_t hmb_desc_paddr; 329}; 330 331#define nvme_mmio_offsetof(reg) \ 332 offsetof(struct nvme_registers, reg) 333 334#define nvme_mmio_read_4(sc, reg) \ 335 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 336 nvme_mmio_offsetof(reg)) 337 338#define nvme_mmio_write_4(sc, reg, val) \ 339 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 340 nvme_mmio_offsetof(reg), val) 341 342#define nvme_mmio_write_8(sc, reg, val) \ 343 do { \ 344 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 345 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 346 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 347 nvme_mmio_offsetof(reg)+4, \ 348 (val & 0xFFFFFFFF00000000ULL) >> 32); \ 349 } while (0); 350 351#define nvme_printf(ctrlr, fmt, args...) \ 352 device_printf(ctrlr->dev, fmt, ##args) 353 354void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 355 356void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 357 void *payload, 358 nvme_cb_fn_t cb_fn, void *cb_arg); 359void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 360 uint32_t nsid, void *payload, 361 nvme_cb_fn_t cb_fn, void *cb_arg); 362void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 363 uint32_t microseconds, 364 uint32_t threshold, 365 nvme_cb_fn_t cb_fn, 366 void *cb_arg); 367void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 368 struct nvme_error_information_entry *payload, 369 uint32_t num_entries, /* 0 = max */ 370 nvme_cb_fn_t cb_fn, 371 void *cb_arg); 372void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 373 uint32_t nsid, 374 struct nvme_health_information_page *payload, 375 nvme_cb_fn_t cb_fn, 376 void *cb_arg); 377void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 378 struct nvme_firmware_page *payload, 379 nvme_cb_fn_t cb_fn, 380 void *cb_arg); 381void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 382 struct nvme_qpair *io_que, 383 nvme_cb_fn_t cb_fn, void *cb_arg); 384void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 385 struct nvme_qpair *io_que, 386 nvme_cb_fn_t cb_fn, void *cb_arg); 387void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 388 struct nvme_qpair *io_que, 389 nvme_cb_fn_t cb_fn, void *cb_arg); 390void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 391 struct nvme_qpair *io_que, 392 nvme_cb_fn_t cb_fn, void *cb_arg); 393void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 394 uint32_t num_queues, nvme_cb_fn_t cb_fn, 395 void *cb_arg); 396void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 397 uint32_t state, 398 nvme_cb_fn_t cb_fn, void *cb_arg); 399void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 400 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 401 402void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); 403 404int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 405void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 406void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr); 407int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr); 408void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 409/* ctrlr defined as void * to allow use with config_intrhook. */ 410void nvme_ctrlr_start_config_hook(void *ctrlr_arg); 411void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 412 struct nvme_request *req); 413void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 414 struct nvme_request *req); 415void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 416 struct nvme_request *req); 417 418int nvme_qpair_construct(struct nvme_qpair *qpair, 419 uint32_t num_entries, uint32_t num_trackers, 420 struct nvme_controller *ctrlr); 421void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 422 struct nvme_tracker *tr); 423bool nvme_qpair_process_completions(struct nvme_qpair *qpair); 424void nvme_qpair_submit_request(struct nvme_qpair *qpair, 425 struct nvme_request *req); 426void nvme_qpair_reset(struct nvme_qpair *qpair); 427void nvme_qpair_fail(struct nvme_qpair *qpair); 428void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 429 struct nvme_request *req, 430 uint32_t sct, uint32_t sc); 431 432void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 433void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 434void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 435 436void nvme_io_qpair_enable(struct nvme_qpair *qpair); 437void nvme_io_qpair_disable(struct nvme_qpair *qpair); 438void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 439 440int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 441 struct nvme_controller *ctrlr); 442void nvme_ns_destruct(struct nvme_namespace *ns); 443 444void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 445 446void nvme_dump_command(struct nvme_command *cmd); 447void nvme_dump_completion(struct nvme_completion *cpl); 448 449int nvme_attach(device_t dev); 450int nvme_shutdown(device_t dev); 451int nvme_detach(device_t dev); 452 453/* 454 * Wait for a command to complete using the nvme_completion_poll_cb. 455 * Used in limited contexts where the caller knows it's OK to block 456 * briefly while the command runs. The ISR will run the callback which 457 * will set status->done to true, usually within microseconds. If not, 458 * then after one second timeout handler should reset the controller 459 * and abort all outstanding requests including this polled one. If 460 * still not after ten seconds, then something is wrong with the driver, 461 * and panic is the only way to recover. 462 */ 463static __inline 464void 465nvme_completion_poll(struct nvme_completion_poll_status *status) 466{ 467 int sanity = hz * 10; 468 469 while (!atomic_load_acq_int(&status->done) && --sanity > 0) 470 pause("nvme", 1); 471 if (sanity <= 0) 472 panic("NVME polled command failed to complete within 10s."); 473} 474 475static __inline void 476nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 477{ 478 uint64_t *bus_addr = (uint64_t *)arg; 479 480 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); 481 if (error != 0) 482 printf("nvme_single_map err %d\n", error); 483 *bus_addr = seg[0].ds_addr; 484} 485 486static __inline struct nvme_request * 487_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) 488{ 489 struct nvme_request *req; 490 491 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO); 492 if (req != NULL) { 493 req->cb_fn = cb_fn; 494 req->cb_arg = cb_arg; 495 req->timeout = true; 496 } 497 return (req); 498} 499 500static __inline struct nvme_request * 501nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, 502 nvme_cb_fn_t cb_fn, void *cb_arg) 503{ 504 struct nvme_request *req; 505 506 req = _nvme_allocate_request(cb_fn, cb_arg); 507 if (req != NULL) { 508 req->type = NVME_REQUEST_VADDR; 509 req->u.payload = payload; 510 req->payload_size = payload_size; 511 } 512 return (req); 513} 514 515static __inline struct nvme_request * 516nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) 517{ 518 struct nvme_request *req; 519 520 req = _nvme_allocate_request(cb_fn, cb_arg); 521 if (req != NULL) 522 req->type = NVME_REQUEST_NULL; 523 return (req); 524} 525 526static __inline struct nvme_request * 527nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg) 528{ 529 struct nvme_request *req; 530 531 req = _nvme_allocate_request(cb_fn, cb_arg); 532 if (req != NULL) { 533 req->type = NVME_REQUEST_BIO; 534 req->u.bio = bio; 535 } 536 return (req); 537} 538 539static __inline struct nvme_request * 540nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg) 541{ 542 struct nvme_request *req; 543 544 req = _nvme_allocate_request(cb_fn, cb_arg); 545 if (req != NULL) { 546 req->type = NVME_REQUEST_CCB; 547 req->u.payload = ccb; 548 } 549 550 return (req); 551} 552 553#define nvme_free_request(req) uma_zfree(nvme_request_zone, req) 554 555void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 556 const struct nvme_completion *async_cpl, 557 uint32_t log_page_id, void *log_page_buffer, 558 uint32_t log_page_size); 559void nvme_notify_fail_consumers(struct nvme_controller *ctrlr); 560void nvme_notify_new_controller(struct nvme_controller *ctrlr); 561void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid); 562 563void nvme_ctrlr_intx_handler(void *arg); 564void nvme_ctrlr_poll(struct nvme_controller *ctrlr); 565 566int nvme_ctrlr_suspend(struct nvme_controller *ctrlr); 567int nvme_ctrlr_resume(struct nvme_controller *ctrlr); 568 569#endif /* __NVME_PRIVATE_H__ */ 570