nvme_qpair.c revision 346249
1/*- 2 * Copyright (C) 2012-2014 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/nvme/nvme_qpair.c 346249 2019-04-15 17:54:40Z mav $"); 29 30#include <sys/param.h> 31#include <sys/bus.h> 32 33#include <dev/pci/pcivar.h> 34 35#include "nvme_private.h" 36 37static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 38 struct nvme_request *req); 39static void nvme_qpair_destroy(struct nvme_qpair *qpair); 40 41struct nvme_opcode_string { 42 43 uint16_t opc; 44 const char * str; 45}; 46 47static struct nvme_opcode_string admin_opcode[] = { 48 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 49 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 50 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 51 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 52 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 53 { NVME_OPC_IDENTIFY, "IDENTIFY" }, 54 { NVME_OPC_ABORT, "ABORT" }, 55 { NVME_OPC_SET_FEATURES, "SET FEATURES" }, 56 { NVME_OPC_GET_FEATURES, "GET FEATURES" }, 57 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 58 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, 59 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 60 { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" }, 61 { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" }, 62 { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" }, 63 { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" }, 64 { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" }, 65 { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" }, 66 { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" }, 67 { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" }, 68 { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" }, 69 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 70 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 71 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 72 { NVME_OPC_SANITIZE, "SANITIZE" }, 73 { 0xFFFF, "ADMIN COMMAND" } 74}; 75 76static struct nvme_opcode_string io_opcode[] = { 77 { NVME_OPC_FLUSH, "FLUSH" }, 78 { NVME_OPC_WRITE, "WRITE" }, 79 { NVME_OPC_READ, "READ" }, 80 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 81 { NVME_OPC_COMPARE, "COMPARE" }, 82 { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, 83 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 84 { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, 85 { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, 86 { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, 87 { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, 88 { 0xFFFF, "IO COMMAND" } 89}; 90 91static const char * 92get_admin_opcode_string(uint16_t opc) 93{ 94 struct nvme_opcode_string *entry; 95 96 entry = admin_opcode; 97 98 while (entry->opc != 0xFFFF) { 99 if (entry->opc == opc) 100 return (entry->str); 101 entry++; 102 } 103 return (entry->str); 104} 105 106static const char * 107get_io_opcode_string(uint16_t opc) 108{ 109 struct nvme_opcode_string *entry; 110 111 entry = io_opcode; 112 113 while (entry->opc != 0xFFFF) { 114 if (entry->opc == opc) 115 return (entry->str); 116 entry++; 117 } 118 return (entry->str); 119} 120 121 122static void 123nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 124 struct nvme_command *cmd) 125{ 126 127 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 128 "cdw10:%08x cdw11:%08x\n", 129 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 130 cmd->nsid, cmd->cdw10, cmd->cdw11); 131} 132 133static void 134nvme_io_qpair_print_command(struct nvme_qpair *qpair, 135 struct nvme_command *cmd) 136{ 137 138 switch (cmd->opc) { 139 case NVME_OPC_WRITE: 140 case NVME_OPC_READ: 141 case NVME_OPC_WRITE_UNCORRECTABLE: 142 case NVME_OPC_COMPARE: 143 case NVME_OPC_WRITE_ZEROES: 144 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 145 "lba:%llu len:%d\n", 146 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 147 cmd->nsid, 148 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 149 (cmd->cdw12 & 0xFFFF) + 1); 150 break; 151 case NVME_OPC_FLUSH: 152 case NVME_OPC_DATASET_MANAGEMENT: 153 case NVME_OPC_RESERVATION_REGISTER: 154 case NVME_OPC_RESERVATION_REPORT: 155 case NVME_OPC_RESERVATION_ACQUIRE: 156 case NVME_OPC_RESERVATION_RELEASE: 157 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 158 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 159 cmd->nsid); 160 break; 161 default: 162 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 163 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 164 cmd->cid, cmd->nsid); 165 break; 166 } 167} 168 169static void 170nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 171{ 172 if (qpair->id == 0) 173 nvme_admin_qpair_print_command(qpair, cmd); 174 else 175 nvme_io_qpair_print_command(qpair, cmd); 176} 177 178struct nvme_status_string { 179 180 uint16_t sc; 181 const char * str; 182}; 183 184static struct nvme_status_string generic_status[] = { 185 { NVME_SC_SUCCESS, "SUCCESS" }, 186 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 187 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 188 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 189 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 190 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 191 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 192 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 193 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 194 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 195 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 196 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 197 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 198 { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" }, 199 { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" }, 200 { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, 201 { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, 202 { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, 203 { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" }, 204 { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" }, 205 { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, 206 { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, 207 { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" }, 208 { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" }, 209 { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" }, 210 { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" }, 211 { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" }, 212 { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, 213 { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, 214 { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" }, 215 { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, 216 217 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 218 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 219 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 220 { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, 221 { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, 222 { 0xFFFF, "GENERIC" } 223}; 224 225static struct nvme_status_string command_specific_status[] = { 226 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 227 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 228 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 229 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 230 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 231 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 232 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 233 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 234 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 235 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 236 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 237 { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, 238 { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" }, 239 { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, 240 { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, 241 { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" }, 242 { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" }, 243 { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" }, 244 { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, 245 { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, 246 { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, 247 { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" }, 248 { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, 249 { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, 250 { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" }, 251 { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" }, 252 { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" }, 253 { NVME_SC_SELT_TEST_IN_PROGRESS, "DEVICE SELT-TEST IN PROGRESS" }, 254 { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" }, 255 { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" }, 256 { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, 257 { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" }, 258 { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, 259 260 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 261 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 262 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 263 { 0xFFFF, "COMMAND SPECIFIC" } 264}; 265 266static struct nvme_status_string media_error_status[] = { 267 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 268 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 269 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 270 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 271 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 272 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 273 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 274 { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" }, 275 { 0xFFFF, "MEDIA ERROR" } 276}; 277 278static const char * 279get_status_string(uint16_t sct, uint16_t sc) 280{ 281 struct nvme_status_string *entry; 282 283 switch (sct) { 284 case NVME_SCT_GENERIC: 285 entry = generic_status; 286 break; 287 case NVME_SCT_COMMAND_SPECIFIC: 288 entry = command_specific_status; 289 break; 290 case NVME_SCT_MEDIA_ERROR: 291 entry = media_error_status; 292 break; 293 case NVME_SCT_VENDOR_SPECIFIC: 294 return ("VENDOR SPECIFIC"); 295 default: 296 return ("RESERVED"); 297 } 298 299 while (entry->sc != 0xFFFF) { 300 if (entry->sc == sc) 301 return (entry->str); 302 entry++; 303 } 304 return (entry->str); 305} 306 307static void 308nvme_qpair_print_completion(struct nvme_qpair *qpair, 309 struct nvme_completion *cpl) 310{ 311 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", 312 get_status_string(cpl->status.sct, cpl->status.sc), 313 cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0); 314} 315 316static boolean_t 317nvme_completion_is_retry(const struct nvme_completion *cpl) 318{ 319 /* 320 * TODO: spec is not clear how commands that are aborted due 321 * to TLER will be marked. So for now, it seems 322 * NAMESPACE_NOT_READY is the only case where we should 323 * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST 324 * set the DNR bit correctly since the driver controls that. 325 */ 326 switch (cpl->status.sct) { 327 case NVME_SCT_GENERIC: 328 switch (cpl->status.sc) { 329 case NVME_SC_ABORTED_BY_REQUEST: 330 case NVME_SC_NAMESPACE_NOT_READY: 331 if (cpl->status.dnr) 332 return (0); 333 else 334 return (1); 335 case NVME_SC_INVALID_OPCODE: 336 case NVME_SC_INVALID_FIELD: 337 case NVME_SC_COMMAND_ID_CONFLICT: 338 case NVME_SC_DATA_TRANSFER_ERROR: 339 case NVME_SC_ABORTED_POWER_LOSS: 340 case NVME_SC_INTERNAL_DEVICE_ERROR: 341 case NVME_SC_ABORTED_SQ_DELETION: 342 case NVME_SC_ABORTED_FAILED_FUSED: 343 case NVME_SC_ABORTED_MISSING_FUSED: 344 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 345 case NVME_SC_COMMAND_SEQUENCE_ERROR: 346 case NVME_SC_LBA_OUT_OF_RANGE: 347 case NVME_SC_CAPACITY_EXCEEDED: 348 default: 349 return (0); 350 } 351 case NVME_SCT_COMMAND_SPECIFIC: 352 case NVME_SCT_MEDIA_ERROR: 353 case NVME_SCT_VENDOR_SPECIFIC: 354 default: 355 return (0); 356 } 357} 358 359static void 360nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 361 struct nvme_completion *cpl, boolean_t print_on_error) 362{ 363 struct nvme_request *req; 364 boolean_t retry, error; 365 366 req = tr->req; 367 error = nvme_completion_is_error(cpl); 368 retry = error && nvme_completion_is_retry(cpl) && 369 req->retries < nvme_retry_count; 370 371 if (error && print_on_error) { 372 nvme_qpair_print_command(qpair, &req->cmd); 373 nvme_qpair_print_completion(qpair, cpl); 374 } 375 376 qpair->act_tr[cpl->cid] = NULL; 377 378 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 379 380 if (req->cb_fn && !retry) 381 req->cb_fn(req->cb_arg, cpl); 382 383 mtx_lock(&qpair->lock); 384 callout_stop(&tr->timer); 385 386 if (retry) { 387 req->retries++; 388 nvme_qpair_submit_tracker(qpair, tr); 389 } else { 390 if (req->type != NVME_REQUEST_NULL) { 391 bus_dmamap_sync(qpair->dma_tag_payload, 392 tr->payload_dma_map, 393 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 394 bus_dmamap_unload(qpair->dma_tag_payload, 395 tr->payload_dma_map); 396 } 397 398 nvme_free_request(req); 399 tr->req = NULL; 400 401 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 402 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 403 404 /* 405 * If the controller is in the middle of resetting, don't 406 * try to submit queued requests here - let the reset logic 407 * handle that instead. 408 */ 409 if (!STAILQ_EMPTY(&qpair->queued_req) && 410 !qpair->ctrlr->is_resetting) { 411 req = STAILQ_FIRST(&qpair->queued_req); 412 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 413 _nvme_qpair_submit_request(qpair, req); 414 } 415 } 416 417 mtx_unlock(&qpair->lock); 418} 419 420static void 421nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, 422 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 423 boolean_t print_on_error) 424{ 425 struct nvme_completion cpl; 426 427 memset(&cpl, 0, sizeof(cpl)); 428 cpl.sqid = qpair->id; 429 cpl.cid = tr->cid; 430 cpl.status.sct = sct; 431 cpl.status.sc = sc; 432 cpl.status.dnr = dnr; 433 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 434} 435 436void 437nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 438 struct nvme_request *req, uint32_t sct, uint32_t sc, 439 boolean_t print_on_error) 440{ 441 struct nvme_completion cpl; 442 boolean_t error; 443 444 memset(&cpl, 0, sizeof(cpl)); 445 cpl.sqid = qpair->id; 446 cpl.status.sct = sct; 447 cpl.status.sc = sc; 448 449 error = nvme_completion_is_error(&cpl); 450 451 if (error && print_on_error) { 452 nvme_qpair_print_command(qpair, &req->cmd); 453 nvme_qpair_print_completion(qpair, &cpl); 454 } 455 456 if (req->cb_fn) 457 req->cb_fn(req->cb_arg, &cpl); 458 459 nvme_free_request(req); 460} 461 462bool 463nvme_qpair_process_completions(struct nvme_qpair *qpair) 464{ 465 struct nvme_tracker *tr; 466 struct nvme_completion *cpl; 467 int done = 0; 468 469 qpair->num_intr_handler_calls++; 470 471 if (!qpair->is_enabled) 472 /* 473 * qpair is not enabled, likely because a controller reset is 474 * is in progress. Ignore the interrupt - any I/O that was 475 * associated with this interrupt will get retried when the 476 * reset is complete. 477 */ 478 return (false); 479 480 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 481 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 482 while (1) { 483 cpl = &qpair->cpl[qpair->cq_head]; 484 485 if (cpl->status.p != qpair->phase) 486 break; 487 488 tr = qpair->act_tr[cpl->cid]; 489 490 if (tr != NULL) { 491 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE); 492 qpair->sq_head = cpl->sqhd; 493 done++; 494 } else { 495 nvme_printf(qpair->ctrlr, 496 "cpl does not map to outstanding cmd\n"); 497 nvme_dump_completion(cpl); 498 KASSERT(0, ("received completion for unknown cmd\n")); 499 } 500 501 if (++qpair->cq_head == qpair->num_entries) { 502 qpair->cq_head = 0; 503 qpair->phase = !qpair->phase; 504 } 505 506 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl, 507 qpair->cq_head); 508 } 509 return (done != 0); 510} 511 512static void 513nvme_qpair_msix_handler(void *arg) 514{ 515 struct nvme_qpair *qpair = arg; 516 517 nvme_qpair_process_completions(qpair); 518} 519 520int 521nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 522 uint16_t vector, uint32_t num_entries, uint32_t num_trackers, 523 struct nvme_controller *ctrlr) 524{ 525 struct nvme_tracker *tr; 526 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; 527 uint64_t queuemem_phys, prpmem_phys, list_phys; 528 uint8_t *queuemem, *prpmem, *prp_list; 529 int i, err; 530 531 qpair->id = id; 532 qpair->vector = vector; 533 qpair->num_entries = num_entries; 534 qpair->num_trackers = num_trackers; 535 qpair->ctrlr = ctrlr; 536 537 if (ctrlr->msix_enabled) { 538 539 /* 540 * MSI-X vector resource IDs start at 1, so we add one to 541 * the queue's vector to get the corresponding rid to use. 542 */ 543 qpair->rid = vector + 1; 544 545 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 546 &qpair->rid, RF_ACTIVE); 547 bus_setup_intr(ctrlr->dev, qpair->res, 548 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 549 nvme_qpair_msix_handler, qpair, &qpair->tag); 550 if (id == 0) { 551 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 552 "admin"); 553 } else { 554 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 555 "io%d", id - 1); 556 } 557 } 558 559 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 560 561 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 562 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 563 4, PAGE_SIZE, BUS_SPACE_MAXADDR, 564 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, 565 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, 566 NULL, NULL, &qpair->dma_tag_payload); 567 if (err != 0) { 568 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 569 goto out; 570 } 571 572 /* 573 * Each component must be page aligned, and individual PRP lists 574 * cannot cross a page boundary. 575 */ 576 cmdsz = qpair->num_entries * sizeof(struct nvme_command); 577 cmdsz = roundup2(cmdsz, PAGE_SIZE); 578 cplsz = qpair->num_entries * sizeof(struct nvme_completion); 579 cplsz = roundup2(cplsz, PAGE_SIZE); 580 prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;; 581 prpmemsz = qpair->num_trackers * prpsz; 582 allocsz = cmdsz + cplsz + prpmemsz; 583 584 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 585 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 586 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); 587 if (err != 0) { 588 nvme_printf(ctrlr, "tag create failed %d\n", err); 589 goto out; 590 } 591 592 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, 593 BUS_DMA_NOWAIT, &qpair->queuemem_map)) { 594 nvme_printf(ctrlr, "failed to alloc qpair memory\n"); 595 goto out; 596 } 597 598 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, 599 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { 600 nvme_printf(ctrlr, "failed to load qpair memory\n"); 601 goto out; 602 } 603 604 qpair->num_cmds = 0; 605 qpair->num_intr_handler_calls = 0; 606 qpair->cmd = (struct nvme_command *)queuemem; 607 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); 608 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); 609 qpair->cmd_bus_addr = queuemem_phys; 610 qpair->cpl_bus_addr = queuemem_phys + cmdsz; 611 prpmem_phys = queuemem_phys + cmdsz + cplsz; 612 613 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl); 614 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl); 615 616 TAILQ_INIT(&qpair->free_tr); 617 TAILQ_INIT(&qpair->outstanding_tr); 618 STAILQ_INIT(&qpair->queued_req); 619 620 list_phys = prpmem_phys; 621 prp_list = prpmem; 622 for (i = 0; i < qpair->num_trackers; i++) { 623 624 if (list_phys + prpsz > prpmem_phys + prpmemsz) { 625 qpair->num_trackers = i; 626 break; 627 } 628 629 /* 630 * Make sure that the PRP list for this tracker doesn't 631 * overflow to another page. 632 */ 633 if (trunc_page(list_phys) != 634 trunc_page(list_phys + prpsz - 1)) { 635 list_phys = roundup2(list_phys, PAGE_SIZE); 636 prp_list = 637 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE); 638 } 639 640 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK); 641 bus_dmamap_create(qpair->dma_tag_payload, 0, 642 &tr->payload_dma_map); 643 callout_init(&tr->timer, 1); 644 tr->cid = i; 645 tr->qpair = qpair; 646 tr->prp = (uint64_t *)prp_list; 647 tr->prp_bus_addr = list_phys; 648 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 649 list_phys += prpsz; 650 prp_list += prpsz; 651 } 652 653 if (qpair->num_trackers == 0) { 654 nvme_printf(ctrlr, "failed to allocate enough trackers\n"); 655 goto out; 656 } 657 658 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * 659 qpair->num_entries, M_NVME, M_ZERO | M_WAITOK); 660 return (0); 661 662out: 663 nvme_qpair_destroy(qpair); 664 return (ENOMEM); 665} 666 667static void 668nvme_qpair_destroy(struct nvme_qpair *qpair) 669{ 670 struct nvme_tracker *tr; 671 672 if (qpair->tag) 673 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 674 675 if (mtx_initialized(&qpair->lock)) 676 mtx_destroy(&qpair->lock); 677 678 if (qpair->res) 679 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 680 rman_get_rid(qpair->res), qpair->res); 681 682 if (qpair->cmd != NULL) { 683 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); 684 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 685 qpair->queuemem_map); 686 } 687 688 if (qpair->act_tr) 689 free(qpair->act_tr, M_NVME); 690 691 while (!TAILQ_EMPTY(&qpair->free_tr)) { 692 tr = TAILQ_FIRST(&qpair->free_tr); 693 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 694 bus_dmamap_destroy(qpair->dma_tag_payload, 695 tr->payload_dma_map); 696 free(tr, M_NVME); 697 } 698 699 if (qpair->dma_tag) 700 bus_dma_tag_destroy(qpair->dma_tag); 701 702 if (qpair->dma_tag_payload) 703 bus_dma_tag_destroy(qpair->dma_tag_payload); 704} 705 706static void 707nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 708{ 709 struct nvme_tracker *tr; 710 711 tr = TAILQ_FIRST(&qpair->outstanding_tr); 712 while (tr != NULL) { 713 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { 714 nvme_qpair_manual_complete_tracker(qpair, tr, 715 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 716 FALSE); 717 tr = TAILQ_FIRST(&qpair->outstanding_tr); 718 } else { 719 tr = TAILQ_NEXT(tr, tailq); 720 } 721 } 722} 723 724void 725nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 726{ 727 728 nvme_admin_qpair_abort_aers(qpair); 729 nvme_qpair_destroy(qpair); 730} 731 732void 733nvme_io_qpair_destroy(struct nvme_qpair *qpair) 734{ 735 736 nvme_qpair_destroy(qpair); 737} 738 739static void 740nvme_abort_complete(void *arg, const struct nvme_completion *status) 741{ 742 struct nvme_tracker *tr = arg; 743 744 /* 745 * If cdw0 == 1, the controller was not able to abort the command 746 * we requested. We still need to check the active tracker array, 747 * to cover race where I/O timed out at same time controller was 748 * completing the I/O. 749 */ 750 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 751 /* 752 * An I/O has timed out, and the controller was unable to 753 * abort it for some reason. Construct a fake completion 754 * status, and then complete the I/O's tracker manually. 755 */ 756 nvme_printf(tr->qpair->ctrlr, 757 "abort command failed, aborting command manually\n"); 758 nvme_qpair_manual_complete_tracker(tr->qpair, tr, 759 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 760 } 761} 762 763static void 764nvme_timeout(void *arg) 765{ 766 struct nvme_tracker *tr = arg; 767 struct nvme_qpair *qpair = tr->qpair; 768 struct nvme_controller *ctrlr = qpair->ctrlr; 769 union csts_register csts; 770 771 /* 772 * Read csts to get value of cfs - controller fatal status. 773 * If no fatal status, try to call the completion routine, and 774 * if completes transactions, report a missed interrupt and 775 * return (this may need to be rate limited). Otherwise, if 776 * aborts are enabled and the controller is not reporting 777 * fatal status, abort the command. Otherwise, just reset the 778 * controller and hope for the best. 779 */ 780 csts.raw = nvme_mmio_read_4(ctrlr, csts); 781 if (csts.bits.cfs == 0 && nvme_qpair_process_completions(qpair)) { 782 nvme_printf(ctrlr, "Missing interrupt\n"); 783 return; 784 } 785 if (ctrlr->enable_aborts && csts.bits.cfs == 0) { 786 nvme_printf(ctrlr, "Aborting command due to a timeout.\n"); 787 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, 788 nvme_abort_complete, tr); 789 } else { 790 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n", 791 csts.bits.cfs ? " and fatal error status" : ""); 792 nvme_ctrlr_reset(ctrlr); 793 } 794} 795 796void 797nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 798{ 799 struct nvme_request *req; 800 struct nvme_controller *ctrlr; 801 802 mtx_assert(&qpair->lock, MA_OWNED); 803 804 req = tr->req; 805 req->cmd.cid = tr->cid; 806 qpair->act_tr[tr->cid] = tr; 807 ctrlr = qpair->ctrlr; 808 809 if (req->timeout) 810 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz, 811 nvme_timeout, tr); 812 813 /* Copy the command from the tracker to the submission queue. */ 814 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 815 816 if (++qpair->sq_tail == qpair->num_entries) 817 qpair->sq_tail = 0; 818 819 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 821#ifndef __powerpc__ 822 /* 823 * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but 824 * no other archs do. 825 */ 826 wmb(); 827#endif 828 829 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl, 830 qpair->sq_tail); 831 832 qpair->num_cmds++; 833} 834 835static void 836nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 837{ 838 struct nvme_tracker *tr = arg; 839 uint32_t cur_nseg; 840 841 /* 842 * If the mapping operation failed, return immediately. The caller 843 * is responsible for detecting the error status and failing the 844 * tracker manually. 845 */ 846 if (error != 0) { 847 nvme_printf(tr->qpair->ctrlr, 848 "nvme_payload_map err %d\n", error); 849 return; 850 } 851 852 /* 853 * Note that we specified PAGE_SIZE for alignment and max 854 * segment size when creating the bus dma tags. So here 855 * we can safely just transfer each segment to its 856 * associated PRP entry. 857 */ 858 tr->req->cmd.prp1 = seg[0].ds_addr; 859 860 if (nseg == 2) { 861 tr->req->cmd.prp2 = seg[1].ds_addr; 862 } else if (nseg > 2) { 863 cur_nseg = 1; 864 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr; 865 while (cur_nseg < nseg) { 866 tr->prp[cur_nseg-1] = 867 (uint64_t)seg[cur_nseg].ds_addr; 868 cur_nseg++; 869 } 870 } else { 871 /* 872 * prp2 should not be used by the controller 873 * since there is only one segment, but set 874 * to 0 just to be safe. 875 */ 876 tr->req->cmd.prp2 = 0; 877 } 878 879 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, 880 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 881 nvme_qpair_submit_tracker(tr->qpair, tr); 882} 883 884static void 885_nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 886{ 887 struct nvme_tracker *tr; 888 int err = 0; 889 890 mtx_assert(&qpair->lock, MA_OWNED); 891 892 tr = TAILQ_FIRST(&qpair->free_tr); 893 req->qpair = qpair; 894 895 if (tr == NULL || !qpair->is_enabled) { 896 /* 897 * No tracker is available, or the qpair is disabled due to 898 * an in-progress controller-level reset or controller 899 * failure. 900 */ 901 902 if (qpair->ctrlr->is_failed) { 903 /* 904 * The controller has failed. Post the request to a 905 * task where it will be aborted, so that we do not 906 * invoke the request's callback in the context 907 * of the submission. 908 */ 909 nvme_ctrlr_post_failed_request(qpair->ctrlr, req); 910 } else { 911 /* 912 * Put the request on the qpair's request queue to be 913 * processed when a tracker frees up via a command 914 * completion or when the controller reset is 915 * completed. 916 */ 917 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 918 } 919 return; 920 } 921 922 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 923 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 924 tr->req = req; 925 926 switch (req->type) { 927 case NVME_REQUEST_VADDR: 928 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, 929 ("payload_size (%d) exceeds max_xfer_size (%d)\n", 930 req->payload_size, qpair->ctrlr->max_xfer_size)); 931 err = bus_dmamap_load(tr->qpair->dma_tag_payload, 932 tr->payload_dma_map, req->u.payload, req->payload_size, 933 nvme_payload_map, tr, 0); 934 if (err != 0) 935 nvme_printf(qpair->ctrlr, 936 "bus_dmamap_load returned 0x%x!\n", err); 937 break; 938 case NVME_REQUEST_NULL: 939 nvme_qpair_submit_tracker(tr->qpair, tr); 940 break; 941 case NVME_REQUEST_BIO: 942 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, 943 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", 944 (intmax_t)req->u.bio->bio_bcount, 945 qpair->ctrlr->max_xfer_size)); 946 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, 947 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); 948 if (err != 0) 949 nvme_printf(qpair->ctrlr, 950 "bus_dmamap_load_bio returned 0x%x!\n", err); 951 break; 952 case NVME_REQUEST_CCB: 953 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload, 954 tr->payload_dma_map, req->u.payload, 955 nvme_payload_map, tr, 0); 956 if (err != 0) 957 nvme_printf(qpair->ctrlr, 958 "bus_dmamap_load_ccb returned 0x%x!\n", err); 959 break; 960 default: 961 panic("unknown nvme request type 0x%x\n", req->type); 962 break; 963 } 964 965 if (err != 0) { 966 /* 967 * The dmamap operation failed, so we manually fail the 968 * tracker here with DATA_TRANSFER_ERROR status. 969 * 970 * nvme_qpair_manual_complete_tracker must not be called 971 * with the qpair lock held. 972 */ 973 mtx_unlock(&qpair->lock); 974 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 975 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE); 976 mtx_lock(&qpair->lock); 977 } 978} 979 980void 981nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 982{ 983 984 mtx_lock(&qpair->lock); 985 _nvme_qpair_submit_request(qpair, req); 986 mtx_unlock(&qpair->lock); 987} 988 989static void 990nvme_qpair_enable(struct nvme_qpair *qpair) 991{ 992 993 qpair->is_enabled = TRUE; 994} 995 996void 997nvme_qpair_reset(struct nvme_qpair *qpair) 998{ 999 1000 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 1001 1002 /* 1003 * First time through the completion queue, HW will set phase 1004 * bit on completions to 1. So set this to 1 here, indicating 1005 * we're looking for a 1 to know which entries have completed. 1006 * we'll toggle the bit each time when the completion queue 1007 * rolls over. 1008 */ 1009 qpair->phase = 1; 1010 1011 memset(qpair->cmd, 0, 1012 qpair->num_entries * sizeof(struct nvme_command)); 1013 memset(qpair->cpl, 0, 1014 qpair->num_entries * sizeof(struct nvme_completion)); 1015} 1016 1017void 1018nvme_admin_qpair_enable(struct nvme_qpair *qpair) 1019{ 1020 struct nvme_tracker *tr; 1021 struct nvme_tracker *tr_temp; 1022 1023 /* 1024 * Manually abort each outstanding admin command. Do not retry 1025 * admin commands found here, since they will be left over from 1026 * a controller reset and its likely the context in which the 1027 * command was issued no longer applies. 1028 */ 1029 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1030 nvme_printf(qpair->ctrlr, 1031 "aborting outstanding admin command\n"); 1032 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1033 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1034 } 1035 1036 nvme_qpair_enable(qpair); 1037} 1038 1039void 1040nvme_io_qpair_enable(struct nvme_qpair *qpair) 1041{ 1042 STAILQ_HEAD(, nvme_request) temp; 1043 struct nvme_tracker *tr; 1044 struct nvme_tracker *tr_temp; 1045 struct nvme_request *req; 1046 1047 /* 1048 * Manually abort each outstanding I/O. This normally results in a 1049 * retry, unless the retry count on the associated request has 1050 * reached its limit. 1051 */ 1052 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1053 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 1054 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1055 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 1056 } 1057 1058 mtx_lock(&qpair->lock); 1059 1060 nvme_qpair_enable(qpair); 1061 1062 STAILQ_INIT(&temp); 1063 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 1064 1065 while (!STAILQ_EMPTY(&temp)) { 1066 req = STAILQ_FIRST(&temp); 1067 STAILQ_REMOVE_HEAD(&temp, stailq); 1068 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 1069 nvme_qpair_print_command(qpair, &req->cmd); 1070 _nvme_qpair_submit_request(qpair, req); 1071 } 1072 1073 mtx_unlock(&qpair->lock); 1074} 1075 1076static void 1077nvme_qpair_disable(struct nvme_qpair *qpair) 1078{ 1079 struct nvme_tracker *tr; 1080 1081 qpair->is_enabled = FALSE; 1082 mtx_lock(&qpair->lock); 1083 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) 1084 callout_stop(&tr->timer); 1085 mtx_unlock(&qpair->lock); 1086} 1087 1088void 1089nvme_admin_qpair_disable(struct nvme_qpair *qpair) 1090{ 1091 1092 nvme_qpair_disable(qpair); 1093 nvme_admin_qpair_abort_aers(qpair); 1094} 1095 1096void 1097nvme_io_qpair_disable(struct nvme_qpair *qpair) 1098{ 1099 1100 nvme_qpair_disable(qpair); 1101} 1102 1103void 1104nvme_qpair_fail(struct nvme_qpair *qpair) 1105{ 1106 struct nvme_tracker *tr; 1107 struct nvme_request *req; 1108 1109 if (!mtx_initialized(&qpair->lock)) 1110 return; 1111 1112 mtx_lock(&qpair->lock); 1113 1114 while (!STAILQ_EMPTY(&qpair->queued_req)) { 1115 req = STAILQ_FIRST(&qpair->queued_req); 1116 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 1117 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 1118 mtx_unlock(&qpair->lock); 1119 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 1120 NVME_SC_ABORTED_BY_REQUEST, TRUE); 1121 mtx_lock(&qpair->lock); 1122 } 1123 1124 /* Manually abort each outstanding I/O. */ 1125 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1126 tr = TAILQ_FIRST(&qpair->outstanding_tr); 1127 /* 1128 * Do not remove the tracker. The abort_tracker path will 1129 * do that for us. 1130 */ 1131 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 1132 mtx_unlock(&qpair->lock); 1133 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1134 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1135 mtx_lock(&qpair->lock); 1136 } 1137 1138 mtx_unlock(&qpair->lock); 1139} 1140 1141