nvme_private.h revision 248755
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/dev/nvme/nvme_private.h 248755 2013-03-26 20:56:58Z jimharris $ 27 */ 28 29#ifndef __NVME_PRIVATE_H__ 30#define __NVME_PRIVATE_H__ 31 32#include <sys/param.h> 33#include <sys/kernel.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/rman.h> 38#include <sys/systm.h> 39#include <sys/taskqueue.h> 40 41#include <vm/uma.h> 42 43#include <machine/bus.h> 44 45#include "nvme.h" 46 47#define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev)) 48 49MALLOC_DECLARE(M_NVME); 50 51#define CHATHAM2 52 53#ifdef CHATHAM2 54#define CHATHAM_PCI_ID 0x20118086 55#define CHATHAM_CONTROL_BAR 0 56#endif 57 58#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ 59#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ 60 61#define NVME_MAX_PRP_LIST_ENTRIES (32) 62 63/* 64 * For commands requiring more than 2 PRP entries, one PRP will be 65 * embedded in the command (prp1), and the rest of the PRP entries 66 * will be in a list pointed to by the command (prp2). This means 67 * that real max number of PRP entries we support is 32+1, which 68 * results in a max xfer size of 32*PAGE_SIZE. 69 */ 70#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE 71 72#define NVME_ADMIN_TRACKERS (16) 73#define NVME_ADMIN_ENTRIES (128) 74/* min and max are defined in admin queue attributes section of spec */ 75#define NVME_MIN_ADMIN_ENTRIES (2) 76#define NVME_MAX_ADMIN_ENTRIES (4096) 77 78/* 79 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion 80 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we 81 * will allow outstanding on an I/O qpair at any time. The only advantage in 82 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping 83 * the contents of the submission and completion queues, it will show a longer 84 * history of data. 85 */ 86#define NVME_IO_ENTRIES (256) 87#define NVME_IO_TRACKERS (128) 88#define NVME_MIN_IO_TRACKERS (16) 89#define NVME_MAX_IO_TRACKERS (1024) 90 91/* 92 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES 93 * for each controller. 94 */ 95 96#define NVME_INT_COAL_TIME (0) /* disabled */ 97#define NVME_INT_COAL_THRESHOLD (0) /* 0-based */ 98 99#define NVME_MAX_NAMESPACES (16) 100#define NVME_MAX_CONSUMERS (2) 101#define NVME_MAX_ASYNC_EVENTS (8) 102 103#define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */ 104#define NVME_MIN_TIMEOUT_PERIOD (5) 105#define NVME_MAX_TIMEOUT_PERIOD (120) 106 107#ifndef CACHE_LINE_SIZE 108#define CACHE_LINE_SIZE (64) 109#endif 110 111extern uma_zone_t nvme_request_zone; 112 113struct nvme_request { 114 115 struct nvme_command cmd; 116 void *payload; 117 uint32_t payload_size; 118 boolean_t timeout; 119 struct uio *uio; 120 nvme_cb_fn_t cb_fn; 121 void *cb_arg; 122 STAILQ_ENTRY(nvme_request) stailq; 123}; 124 125struct nvme_async_event_request { 126 127 struct nvme_controller *ctrlr; 128 struct nvme_request *req; 129}; 130 131struct nvme_tracker { 132 133 TAILQ_ENTRY(nvme_tracker) tailq; 134 struct nvme_request *req; 135 struct nvme_qpair *qpair; 136 struct callout timer; 137 bus_dmamap_t payload_dma_map; 138 uint16_t cid; 139 140 uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES]; 141 bus_addr_t prp_bus_addr; 142 bus_dmamap_t prp_dma_map; 143}; 144 145struct nvme_qpair { 146 147 struct nvme_controller *ctrlr; 148 uint32_t id; 149 uint32_t phase; 150 151 uint16_t vector; 152 int rid; 153 struct resource *res; 154 void *tag; 155 156 uint32_t max_xfer_size; 157 uint32_t num_entries; 158 uint32_t num_trackers; 159 uint32_t sq_tdbl_off; 160 uint32_t cq_hdbl_off; 161 162 uint32_t sq_head; 163 uint32_t sq_tail; 164 uint32_t cq_head; 165 166 int64_t num_cmds; 167 int64_t num_intr_handler_calls; 168 169 struct nvme_command *cmd; 170 struct nvme_completion *cpl; 171 172 bus_dma_tag_t dma_tag; 173 174 bus_dmamap_t cmd_dma_map; 175 uint64_t cmd_bus_addr; 176 177 bus_dmamap_t cpl_dma_map; 178 uint64_t cpl_bus_addr; 179 180 TAILQ_HEAD(, nvme_tracker) free_tr; 181 TAILQ_HEAD(, nvme_tracker) outstanding_tr; 182 STAILQ_HEAD(, nvme_request) queued_req; 183 184 struct nvme_tracker **act_tr; 185 186 boolean_t is_enabled; 187 188 struct mtx lock __aligned(CACHE_LINE_SIZE); 189 190} __aligned(CACHE_LINE_SIZE); 191 192struct nvme_namespace { 193 194 struct nvme_controller *ctrlr; 195 struct nvme_namespace_data data; 196 uint16_t id; 197 uint16_t flags; 198 struct cdev *cdev; 199 void *cons_cookie[NVME_MAX_CONSUMERS]; 200}; 201 202/* 203 * One of these per allocated PCI device. 204 */ 205struct nvme_controller { 206 207 device_t dev; 208 209 uint32_t ready_timeout_in_ms; 210 211 bus_space_tag_t bus_tag; 212 bus_space_handle_t bus_handle; 213 int resource_id; 214 struct resource *resource; 215 216 /* 217 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5, 218 * separate from the control registers which are in BAR 0/1. These 219 * members track the mapping of BAR 4/5 for that reason. 220 */ 221 int bar4_resource_id; 222 struct resource *bar4_resource; 223 224#ifdef CHATHAM2 225 bus_space_tag_t chatham_bus_tag; 226 bus_space_handle_t chatham_bus_handle; 227 int chatham_resource_id; 228 struct resource *chatham_resource; 229#endif 230 231 uint32_t msix_enabled; 232 uint32_t force_intx; 233 uint32_t enable_aborts; 234 235 uint32_t num_io_queues; 236 boolean_t per_cpu_io_queues; 237 238 /* Fields for tracking progress during controller initialization. */ 239 struct intr_config_hook config_hook; 240 uint32_t ns_identified; 241 uint32_t queues_created; 242 uint32_t num_start_attempts; 243 struct task reset_task; 244 struct taskqueue *taskqueue; 245 246 /* For shared legacy interrupt. */ 247 int rid; 248 struct resource *res; 249 void *tag; 250 251 bus_dma_tag_t hw_desc_tag; 252 bus_dmamap_t hw_desc_map; 253 254 /** maximum i/o size in bytes */ 255 uint32_t max_xfer_size; 256 257 /** interrupt coalescing time period (in microseconds) */ 258 uint32_t int_coal_time; 259 260 /** interrupt coalescing threshold */ 261 uint32_t int_coal_threshold; 262 263 /** timeout period in seconds */ 264 uint32_t timeout_period; 265 266 struct nvme_qpair adminq; 267 struct nvme_qpair *ioq; 268 269 struct nvme_registers *regs; 270 271 struct nvme_controller_data cdata; 272 struct nvme_namespace ns[NVME_MAX_NAMESPACES]; 273 274 struct cdev *cdev; 275 276 boolean_t is_started; 277 278 uint32_t num_aers; 279 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; 280 281 void *cons_cookie[NVME_MAX_CONSUMERS]; 282 283 uint32_t is_resetting; 284 285#ifdef CHATHAM2 286 uint64_t chatham_size; 287 uint64_t chatham_lbas; 288#endif 289}; 290 291#define nvme_mmio_offsetof(reg) \ 292 offsetof(struct nvme_registers, reg) 293 294#define nvme_mmio_read_4(sc, reg) \ 295 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ 296 nvme_mmio_offsetof(reg)) 297 298#define nvme_mmio_write_4(sc, reg, val) \ 299 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 300 nvme_mmio_offsetof(reg), val) 301 302#define nvme_mmio_write_8(sc, reg, val) \ 303 do { \ 304 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 305 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \ 306 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ 307 nvme_mmio_offsetof(reg)+4, \ 308 (val & 0xFFFFFFFF00000000UL) >> 32); \ 309 } while (0); 310 311#ifdef CHATHAM2 312#define chatham_read_4(softc, reg) \ 313 bus_space_read_4((softc)->chatham_bus_tag, \ 314 (softc)->chatham_bus_handle, reg) 315 316#define chatham_write_8(sc, reg, val) \ 317 do { \ 318 bus_space_write_4((sc)->chatham_bus_tag, \ 319 (sc)->chatham_bus_handle, reg, val & 0xffffffff); \ 320 bus_space_write_4((sc)->chatham_bus_tag, \ 321 (sc)->chatham_bus_handle, reg+4, \ 322 (val & 0xFFFFFFFF00000000UL) >> 32); \ 323 } while (0); 324 325#endif /* CHATHAM2 */ 326 327#if __FreeBSD_version < 800054 328#define wmb() __asm volatile("sfence" ::: "memory") 329#define mb() __asm volatile("mfence" ::: "memory") 330#endif 331 332void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg); 333 334void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, 335 void *payload, 336 nvme_cb_fn_t cb_fn, void *cb_arg); 337void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, 338 uint16_t nsid, void *payload, 339 nvme_cb_fn_t cb_fn, void *cb_arg); 340void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 341 uint32_t microseconds, 342 uint32_t threshold, 343 nvme_cb_fn_t cb_fn, 344 void *cb_arg); 345void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 346 uint32_t nsid, 347 struct nvme_health_information_page *payload, 348 nvme_cb_fn_t cb_fn, 349 void *cb_arg); 350void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 351 struct nvme_qpair *io_que, uint16_t vector, 352 nvme_cb_fn_t cb_fn, void *cb_arg); 353void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 354 struct nvme_qpair *io_que, 355 nvme_cb_fn_t cb_fn, void *cb_arg); 356void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 357 struct nvme_qpair *io_que, 358 nvme_cb_fn_t cb_fn, void *cb_arg); 359void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 360 struct nvme_qpair *io_que, 361 nvme_cb_fn_t cb_fn, void *cb_arg); 362void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 363 uint32_t num_queues, nvme_cb_fn_t cb_fn, 364 void *cb_arg); 365void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 366 union nvme_critical_warning_state state, 367 nvme_cb_fn_t cb_fn, void *cb_arg); 368void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 369 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); 370 371void nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, 372 int error); 373void nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg, 374 bus_size_t mapsize, int error); 375 376int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev); 377void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev); 378int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr); 379void nvme_ctrlr_reset(struct nvme_controller *ctrlr); 380/* ctrlr defined as void * to allow use with config_intrhook. */ 381void nvme_ctrlr_start(void *ctrlr_arg); 382void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 383 struct nvme_request *req); 384void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 385 struct nvme_request *req); 386 387void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 388 uint16_t vector, uint32_t num_entries, 389 uint32_t num_trackers, uint32_t max_xfer_size, 390 struct nvme_controller *ctrlr); 391void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, 392 struct nvme_tracker *tr); 393void nvme_qpair_process_completions(struct nvme_qpair *qpair); 394void nvme_qpair_submit_request(struct nvme_qpair *qpair, 395 struct nvme_request *req); 396 397void nvme_admin_qpair_enable(struct nvme_qpair *qpair); 398void nvme_admin_qpair_disable(struct nvme_qpair *qpair); 399void nvme_admin_qpair_destroy(struct nvme_qpair *qpair); 400 401void nvme_io_qpair_enable(struct nvme_qpair *qpair); 402void nvme_io_qpair_disable(struct nvme_qpair *qpair); 403void nvme_io_qpair_destroy(struct nvme_qpair *qpair); 404 405int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id, 406 struct nvme_controller *ctrlr); 407void nvme_ns_destruct(struct nvme_namespace *ns); 408 409int nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag); 410 411void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); 412 413void nvme_dump_command(struct nvme_command *cmd); 414void nvme_dump_completion(struct nvme_completion *cpl); 415 416static __inline void 417nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 418{ 419 uint64_t *bus_addr = (uint64_t *)arg; 420 421 *bus_addr = seg[0].ds_addr; 422} 423 424static __inline struct nvme_request * 425nvme_allocate_request(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, 426 void *cb_arg) 427{ 428 struct nvme_request *req; 429 430 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO); 431 if (req == NULL) 432 return (NULL); 433 434 req->payload = payload; 435 req->payload_size = payload_size; 436 req->cb_fn = cb_fn; 437 req->cb_arg = cb_arg; 438 req->timeout = TRUE; 439 440 return (req); 441} 442 443static __inline struct nvme_request * 444nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg) 445{ 446 struct nvme_request *req; 447 448 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO); 449 if (req == NULL) 450 return (NULL); 451 452 req->uio = uio; 453 req->cb_fn = cb_fn; 454 req->cb_arg = cb_arg; 455 req->timeout = TRUE; 456 457 return (req); 458} 459 460#define nvme_free_request(req) uma_zfree(nvme_request_zone, req) 461 462void nvme_notify_async_consumers(struct nvme_controller *ctrlr, 463 const struct nvme_completion *async_cpl); 464 465#endif /* __NVME_PRIVATE_H__ */ 466