nvme_private.h revision 331722
1/*- 2 * Copyright (C) 2012-2014 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/11/sys/dev/nvme/nvme_private.h 331722 2018-03-29 02:50:57Z eadler $ 27 */ 28 29#ifndef __NVME_PRIVATE_H__ 30#define __NVME_PRIVATE_H__ 31 32#include <sys/param.h> 33#include <sys/bio.h> 34#include <sys/bus.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/rman.h> 40#include <sys/systm.h> 41#include <sys/taskqueue.h> 42 43#include <vm/uma.h> 44 45#include <machine/bus.h> 46 47#include "nvme.h" 48 49#define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 50 51MALLOC_DECLARE(M_NVME); 52 53#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 54#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 55 56/* 57 * For commands requiring more than 2 PRP entries, one PRP will be 58 * embedded in the command (prp1), and the rest of the PRP entries 59 * will be in a list pointed to by the command (prp2). This means 60 * that real max number of PRP entries we support is 32+1, which 61 * results in a max xfer size of 32*PAGE_SIZE. 62 */ 63#define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE) 64 65#define NVME_ADMIN_TRACKERS (16) 66#define NVME_ADMIN_ENTRIES (128) 67/* min and max are defined in admin queue attributes section of spec */ 68#define NVME_MIN_ADMIN_ENTRIES (2) 69#define NVME_MAX_ADMIN_ENTRIES (4096) 70 71/* 72 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 73 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 74 * will allow outstanding on an I/O qpair at any time. The only advantage in 75 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 76 * the contents of the submission and completion queues, it will show a longer 77 * history of data. 78 */ 79#define NVME_IO_ENTRIES (256) 80#define NVME_IO_TRACKERS (128) 81#define NVME_MIN_IO_TRACKERS (4) 82#define NVME_MAX_IO_TRACKERS (1024) 83 84/* 85 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES 86 * for each controller. 87 */ 88 89#define NVME_INT_COAL_TIME (0) /* disabled */ 90#define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 91 92#define NVME_MAX_NAMESPACES (16) 93#define NVME_MAX_CONSUMERS (2) 94#define NVME_MAX_ASYNC_EVENTS (8) 95 96#define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 97#define NVME_MIN_TIMEOUT_PERIOD (5) 98#define NVME_MAX_TIMEOUT_PERIOD (120) 99 100#define NVME_DEFAULT_RETRY_COUNT (4) 101 102/* Maximum log page size to fetch for AERs. */ 103#define NVME_MAX_AER_LOG_SIZE (4096) 104 105/* 106 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define 107 * it. 108 */ 109#ifndef CACHE_LINE_SIZE 110#define CACHE_LINE_SIZE (64) 111#endif 112 113/* 114 * Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O 115 * support and the bus_dmamap_load_bio API are available on the target 116 * kernel. This will ease porting back to earlier stable branches at a 117 * later point. 118 */ 119#ifdef BIO_UNMAPPED 120#define NVME_UNMAPPED_BIO_SUPPORT 121#endif 122 123extern uma_zone_t nvme_request_zone; 124extern int32_t nvme_retry_count; 125 126struct nvme_completion_poll_status { 127 128 struct nvme_completion cpl; 129 int done; 130}; 131 132#define NVME_REQUEST_VADDR 1 133#define NVME_REQUEST_NULL 2 /* For requests with no payload. */ 134#define NVME_REQUEST_UIO 3 135#ifdef NVME_UNMAPPED_BIO_SUPPORT 136#define NVME_REQUEST_BIO 4 137#endif 138#define NVME_REQUEST_CCB 5 139 140struct nvme_request { 141 142 struct nvme_command cmd; 143 struct nvme_qpair *qpair; 144 union { 145 void *payload; 146 struct bio *bio; 147 } u; 148 uint32_t type; 149 uint32_t payload_size; 150 boolean_t timeout; 151 nvme_cb_fn_t cb_fn; 152 void *cb_arg; 153 int32_t retries; 154 STAILQ_ENTRY(nvme_request) stailq; 155}; 156 157struct nvme_async_event_request { 158 159 struct nvme_controller *ctrlr; 160 struct nvme_request *req; 161 struct nvme_completion cpl; 162 uint32_t log_page_id; 163 uint32_t log_page_size; 164 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE]; 165}; 166 167struct nvme_tracker { 168 169 TAILQ_ENTRY(nvme_tracker) tailq; 170 struct nvme_request *req; 171 struct nvme_qpair *qpair; 172 struct callout timer; 173 bus_dmamap_t payload_dma_map; 174 uint16_t cid; 175 176 uint64_t *prp; 177 bus_addr_t prp_bus_addr; 178}; 179 180struct nvme_qpair { 181 182 struct nvme_controller *ctrlr; 183 uint32_t id; 184 uint32_t phase; 185 186 uint16_t vector; 187 int rid; 188 struct resource *res; 189 void *tag; 190 191 uint32_t num_entries; 192 uint32_t num_trackers; 193 uint32_t sq_tdbl_off; 194 uint32_t cq_hdbl_off; 195 196 uint32_t sq_head; 197 uint32_t sq_tail; 198 uint32_t cq_head; 199 200 int64_t num_cmds; 201 int64_t num_intr_handler_calls; 202 203 struct nvme_command *cmd; 204 struct nvme_completion *cpl; 205 206 bus_dma_tag_t dma_tag; 207 bus_dma_tag_t dma_tag_payload; 208 209 bus_dmamap_t queuemem_map; 210 uint64_t cmd_bus_addr; 211 uint64_t cpl_bus_addr; 212 213 TAILQ_HEAD(, nvme_tracker) free_tr; 214 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 215 STAILQ_HEAD(, nvme_request) queued_req; 216 217 struct nvme_tracker **act_tr; 218 219 boolean_t is_enabled; 220 221 struct mtx lock __aligned(CACHE_LINE_SIZE); 222 223} __aligned(CACHE_LINE_SIZE); 224 225struct nvme_namespace { 226 227 struct nvme_controller *ctrlr; 228 struct nvme_namespace_data data; 229 uint32_t id; 230 uint32_t flags; 231 struct cdev *cdev; 232 void *cons_cookie[NVME_MAX_CONSUMERS]; 233 uint32_t stripesize; 234 struct mtx lock; 235}; 236 237/* 238 * One of these per allocated PCI device. 239 */ 240struct nvme_controller { 241 242 device_t dev; 243 244 struct mtx lock; 245 246 uint32_t ready_timeout_in_ms; 247 uint32_t quirks; 248#define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */ 249 250 bus_space_tag_t bus_tag; 251 bus_space_handle_t bus_handle; 252 int resource_id; 253 struct resource *resource; 254 255 /* 256 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 257 * separate from the control registers which are in BAR 0/1. These 258 * members track the mapping of BAR 4/5 for that reason. 259 */ 260 int bar4_resource_id; 261 struct resource *bar4_resource; 262 263 uint32_t msix_enabled; 264 uint32_t force_intx; 265 uint32_t enable_aborts; 266 267 uint32_t num_io_queues; 268 uint32_t num_cpus_per_ioq; 269 uint32_t max_hw_pend_io; 270 271 /* Fields for tracking progress during controller initialization. */ 272 struct intr_config_hook config_hook; 273 uint32_t ns_identified; 274 uint32_t queues_created; 275 276 struct task reset_task; 277 struct task fail_req_task; 278 struct taskqueue *taskqueue; 279 280 /* For shared legacy interrupt. */ 281 int rid; 282 struct resource *res; 283 void *tag; 284 285 bus_dma_tag_t hw_desc_tag; 286 bus_dmamap_t hw_desc_map; 287 288 /** maximum i/o size in bytes */ 289 uint32_t max_xfer_size; 290 291 /** minimum page size supported by this controller in bytes */ 292 uint32_t min_page_size; 293 294 /** interrupt coalescing time period (in microseconds) */ 295 uint32_t int_coal_time; 296 297 /** interrupt coalescing threshold */ 298 uint32_t int_coal_threshold; 299 300 /** timeout period in seconds */ 301 uint32_t timeout_period; 302 303 struct nvme_qpair adminq; 304 struct nvme_qpair *ioq; 305 306 struct nvme_registers *regs; 307 308 struct nvme_controller_data cdata; 309 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 310 311 struct cdev *cdev; 312 313 /** bit mask of warning types currently enabled for async events */ 314 union nvme_critical_warning_state async_event_config; 315 316 uint32_t num_aers; 317 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 318 319 void *cons_cookie[NVME_MAX_CONSUMERS]; 320 321 uint32_t is_resetting; 322 uint32_t is_initialized; 323 uint32_t notification_sent; 324 325 boolean_t is_failed; 326 STAILQ_HEAD(, nvme_request) fail_req; 327}; 328 329#define nvme_mmio_offsetof(reg) \ 330 offsetof(struct nvme_registers, reg) 331 332#define nvme_mmio_read_4(sc, reg) \ 333 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 334 nvme_mmio_offsetof(reg)) 335 336#define nvme_mmio_write_4(sc, reg, val) \ 337 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 338 nvme_mmio_offsetof(reg), val) 339 340#define nvme_mmio_write_8(sc, reg, val) \ 341 do { \ 342 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 343 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 344 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 345 nvme_mmio_offsetof(reg)+4, \ 346 (val & 0xFFFFFFFF00000000UL) >> 32); \ 347 } while (0); 348 349#if __FreeBSD_version < 800054 350#define wmb() __asm volatile("sfence" ::: "memory") 351#define mb() __asm volatile("mfence" ::: "memory") 352#endif 353 354#define nvme_printf(ctrlr, fmt, args...) \ 355 device_printf(ctrlr->dev, fmt, ##args) 356 357void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 358 359void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 360 void *payload, 361 nvme_cb_fn_t cb_fn, void *cb_arg); 362void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 363 uint32_t nsid, void *payload, 364 nvme_cb_fn_t cb_fn, void *cb_arg); 365void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 366 uint32_t microseconds, 367 uint32_t threshold, 368 nvme_cb_fn_t cb_fn, 369 void *cb_arg); 370void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 371 struct nvme_error_information_entry *payload, 372 uint32_t num_entries, /* 0 = max */ 373 nvme_cb_fn_t cb_fn, 374 void *cb_arg); 375void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 376 uint32_t nsid, 377 struct nvme_health_information_page *payload, 378 nvme_cb_fn_t cb_fn, 379 void *cb_arg); 380void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 381 struct nvme_firmware_page *payload, 382 nvme_cb_fn_t cb_fn, 383 void *cb_arg); 384void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 385 struct nvme_qpair *io_que, uint16_t vector, 386 nvme_cb_fn_t cb_fn, void *cb_arg); 387void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 388 struct nvme_qpair *io_que, 389 nvme_cb_fn_t cb_fn, void *cb_arg); 390void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 391 struct nvme_qpair *io_que, 392 nvme_cb_fn_t cb_fn, void *cb_arg); 393void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 394 struct nvme_qpair *io_que, 395 nvme_cb_fn_t cb_fn, void *cb_arg); 396void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 397 uint32_t num_queues, nvme_cb_fn_t cb_fn, 398 void *cb_arg); 399void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 400 union nvme_critical_warning_state state, 401 nvme_cb_fn_t cb_fn, void *cb_arg); 402void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 403 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 404 405void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); 406 407int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 408void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 409void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr); 410int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr); 411void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 412/* ctrlr defined as void * to allow use with config_intrhook. */ 413void nvme_ctrlr_start_config_hook(void *ctrlr_arg); 414void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 415 struct nvme_request *req); 416void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 417 struct nvme_request *req); 418void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 419 struct nvme_request *req); 420 421int nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 422 uint16_t vector, uint32_t num_entries, 423 uint32_t num_trackers, 424 struct nvme_controller *ctrlr); 425void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 426 struct nvme_tracker *tr); 427void nvme_qpair_process_completions(struct nvme_qpair *qpair); 428void nvme_qpair_submit_request(struct nvme_qpair *qpair, 429 struct nvme_request *req); 430void nvme_qpair_reset(struct nvme_qpair *qpair); 431void nvme_qpair_fail(struct nvme_qpair *qpair); 432void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 433 struct nvme_request *req, 434 uint32_t sct, uint32_t sc, 435 boolean_t print_on_error); 436 437void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 438void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 439void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 440 441void nvme_io_qpair_enable(struct nvme_qpair *qpair); 442void nvme_io_qpair_disable(struct nvme_qpair *qpair); 443void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 444 445int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, 446 struct nvme_controller *ctrlr); 447void nvme_ns_destruct(struct nvme_namespace *ns); 448 449void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 450 451void nvme_dump_command(struct nvme_command *cmd); 452void nvme_dump_completion(struct nvme_completion *cpl); 453 454static __inline void 455nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 456{ 457 uint64_t *bus_addr = (uint64_t *)arg; 458 459 if (error != 0) 460 printf("nvme_single_map err %d\n", error); 461 *bus_addr = seg[0].ds_addr; 462} 463 464static __inline struct nvme_request * 465_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) 466{ 467 struct nvme_request *req; 468 469 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO); 470 if (req != NULL) { 471 req->cb_fn = cb_fn; 472 req->cb_arg = cb_arg; 473 req->timeout = TRUE; 474 } 475 return (req); 476} 477 478static __inline struct nvme_request * 479nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, 480 nvme_cb_fn_t cb_fn, void *cb_arg) 481{ 482 struct nvme_request *req; 483 484 req = _nvme_allocate_request(cb_fn, cb_arg); 485 if (req != NULL) { 486 req->type = NVME_REQUEST_VADDR; 487 req->u.payload = payload; 488 req->payload_size = payload_size; 489 } 490 return (req); 491} 492 493static __inline struct nvme_request * 494nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) 495{ 496 struct nvme_request *req; 497 498 req = _nvme_allocate_request(cb_fn, cb_arg); 499 if (req != NULL) 500 req->type = NVME_REQUEST_NULL; 501 return (req); 502} 503 504static __inline struct nvme_request * 505nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg) 506{ 507 struct nvme_request *req; 508 509 req = _nvme_allocate_request(cb_fn, cb_arg); 510 if (req != NULL) { 511#ifdef NVME_UNMAPPED_BIO_SUPPORT 512 req->type = NVME_REQUEST_BIO; 513 req->u.bio = bio; 514#else 515 req->type = NVME_REQUEST_VADDR; 516 req->u.payload = bio->bio_data; 517 req->payload_size = bio->bio_bcount; 518#endif 519 } 520 return (req); 521} 522 523static __inline struct nvme_request * 524nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg) 525{ 526 struct nvme_request *req; 527 528 req = _nvme_allocate_request(cb_fn, cb_arg); 529 if (req != NULL) { 530 req->type = NVME_REQUEST_CCB; 531 req->u.payload = ccb; 532 } 533 534 return (req); 535} 536 537#define nvme_free_request(req) uma_zfree(nvme_request_zone, req) 538 539void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 540 const struct nvme_completion *async_cpl, 541 uint32_t log_page_id, void *log_page_buffer, 542 uint32_t log_page_size); 543void nvme_notify_fail_consumers(struct nvme_controller *ctrlr); 544void nvme_notify_new_controller(struct nvme_controller *ctrlr); 545 546void nvme_ctrlr_intx_handler(void *arg); 547void nvme_ctrlr_poll(struct nvme_controller *ctrlr); 548 549#endif /* __NVME_PRIVATE_H__ */ 550