ioat.c revision 302352
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/ioat/ioat.c 302352 2016-07-05 20:51:52Z cem $"); 29 30#include "opt_ddb.h" 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/conf.h> 36#include <sys/ioccom.h> 37#include <sys/kernel.h> 38#include <sys/lock.h> 39#include <sys/malloc.h> 40#include <sys/module.h> 41#include <sys/mutex.h> 42#include <sys/rman.h> 43#include <sys/sbuf.h> 44#include <sys/sysctl.h> 45#include <sys/taskqueue.h> 46#include <sys/time.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49#include <machine/bus.h> 50#include <machine/resource.h> 51#include <machine/stdarg.h> 52 53#ifdef DDB 54#include <ddb/ddb.h> 55#endif 56 57#include "ioat.h" 58#include "ioat_hw.h" 59#include "ioat_internal.h" 60 61#ifndef BUS_SPACE_MAXADDR_40BIT 62#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL 63#endif 64#define IOAT_REFLK (&ioat->submit_lock) 65#define IOAT_SHRINK_PERIOD (10 * hz) 66 67static int ioat_probe(device_t device); 68static int ioat_attach(device_t device); 69static int ioat_detach(device_t device); 70static int ioat_setup_intr(struct ioat_softc *ioat); 71static int ioat_teardown_intr(struct ioat_softc *ioat); 72static int ioat3_attach(device_t device); 73static int ioat_start_channel(struct ioat_softc *ioat); 74static int ioat_map_pci_bar(struct ioat_softc *ioat); 75static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 76 int error); 77static void ioat_interrupt_handler(void *arg); 78static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 79static int chanerr_to_errno(uint32_t); 80static void ioat_process_events(struct ioat_softc *ioat); 81static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 82static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 83static void ioat_free_ring(struct ioat_softc *, uint32_t size, 84 struct ioat_descriptor **); 85static void ioat_free_ring_entry(struct ioat_softc *ioat, 86 struct ioat_descriptor *desc); 87static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, 88 int mflags); 89static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 90static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, 91 uint32_t index); 92static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, 93 uint32_t size, boolean_t need_dscr, int mflags); 94static int ring_grow(struct ioat_softc *, uint32_t oldorder, 95 struct ioat_descriptor **); 96static int ring_shrink(struct ioat_softc *, uint32_t oldorder, 97 struct ioat_descriptor **); 98static void ioat_halted_debug(struct ioat_softc *, uint32_t); 99static void ioat_poll_timer_callback(void *arg); 100static void ioat_shrink_timer_callback(void *arg); 101static void dump_descriptor(void *hw_desc); 102static void ioat_submit_single(struct ioat_softc *ioat); 103static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 104 int error); 105static int ioat_reset_hw(struct ioat_softc *ioat); 106static void ioat_reset_hw_task(void *, int); 107static void ioat_setup_sysctl(device_t device); 108static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 109static inline struct ioat_softc *ioat_get(struct ioat_softc *, 110 enum ioat_ref_kind); 111static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); 112static inline void _ioat_putn(struct ioat_softc *, uint32_t, 113 enum ioat_ref_kind, boolean_t); 114static inline void ioat_putn(struct ioat_softc *, uint32_t, 115 enum ioat_ref_kind); 116static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, 117 enum ioat_ref_kind); 118static void ioat_drain_locked(struct ioat_softc *); 119 120#define ioat_log_message(v, ...) do { \ 121 if ((v) <= g_ioat_debug_level) { \ 122 device_printf(ioat->device, __VA_ARGS__); \ 123 } \ 124} while (0) 125 126MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 127SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 128 129static int g_force_legacy_interrupts; 130SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 131 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 132 133int g_ioat_debug_level = 0; 134SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 135 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 136 137/* 138 * OS <-> Driver interface structures 139 */ 140static device_method_t ioat_pci_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_probe, ioat_probe), 143 DEVMETHOD(device_attach, ioat_attach), 144 DEVMETHOD(device_detach, ioat_detach), 145 DEVMETHOD_END 146}; 147 148static driver_t ioat_pci_driver = { 149 "ioat", 150 ioat_pci_methods, 151 sizeof(struct ioat_softc), 152}; 153 154static devclass_t ioat_devclass; 155DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 156MODULE_VERSION(ioat, 1); 157 158/* 159 * Private data structures 160 */ 161static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 162static unsigned ioat_channel_index = 0; 163SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 164 "Number of IOAT channels attached"); 165 166static struct _pcsid 167{ 168 u_int32_t type; 169 const char *desc; 170} pci_ids[] = { 171 { 0x34308086, "TBG IOAT Ch0" }, 172 { 0x34318086, "TBG IOAT Ch1" }, 173 { 0x34328086, "TBG IOAT Ch2" }, 174 { 0x34338086, "TBG IOAT Ch3" }, 175 { 0x34298086, "TBG IOAT Ch4" }, 176 { 0x342a8086, "TBG IOAT Ch5" }, 177 { 0x342b8086, "TBG IOAT Ch6" }, 178 { 0x342c8086, "TBG IOAT Ch7" }, 179 180 { 0x37108086, "JSF IOAT Ch0" }, 181 { 0x37118086, "JSF IOAT Ch1" }, 182 { 0x37128086, "JSF IOAT Ch2" }, 183 { 0x37138086, "JSF IOAT Ch3" }, 184 { 0x37148086, "JSF IOAT Ch4" }, 185 { 0x37158086, "JSF IOAT Ch5" }, 186 { 0x37168086, "JSF IOAT Ch6" }, 187 { 0x37178086, "JSF IOAT Ch7" }, 188 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 189 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 190 191 { 0x3c208086, "SNB IOAT Ch0" }, 192 { 0x3c218086, "SNB IOAT Ch1" }, 193 { 0x3c228086, "SNB IOAT Ch2" }, 194 { 0x3c238086, "SNB IOAT Ch3" }, 195 { 0x3c248086, "SNB IOAT Ch4" }, 196 { 0x3c258086, "SNB IOAT Ch5" }, 197 { 0x3c268086, "SNB IOAT Ch6" }, 198 { 0x3c278086, "SNB IOAT Ch7" }, 199 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 200 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 201 202 { 0x0e208086, "IVB IOAT Ch0" }, 203 { 0x0e218086, "IVB IOAT Ch1" }, 204 { 0x0e228086, "IVB IOAT Ch2" }, 205 { 0x0e238086, "IVB IOAT Ch3" }, 206 { 0x0e248086, "IVB IOAT Ch4" }, 207 { 0x0e258086, "IVB IOAT Ch5" }, 208 { 0x0e268086, "IVB IOAT Ch6" }, 209 { 0x0e278086, "IVB IOAT Ch7" }, 210 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 211 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 212 213 { 0x2f208086, "HSW IOAT Ch0" }, 214 { 0x2f218086, "HSW IOAT Ch1" }, 215 { 0x2f228086, "HSW IOAT Ch2" }, 216 { 0x2f238086, "HSW IOAT Ch3" }, 217 { 0x2f248086, "HSW IOAT Ch4" }, 218 { 0x2f258086, "HSW IOAT Ch5" }, 219 { 0x2f268086, "HSW IOAT Ch6" }, 220 { 0x2f278086, "HSW IOAT Ch7" }, 221 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 222 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 223 224 { 0x0c508086, "BWD IOAT Ch0" }, 225 { 0x0c518086, "BWD IOAT Ch1" }, 226 { 0x0c528086, "BWD IOAT Ch2" }, 227 { 0x0c538086, "BWD IOAT Ch3" }, 228 229 { 0x6f508086, "BDXDE IOAT Ch0" }, 230 { 0x6f518086, "BDXDE IOAT Ch1" }, 231 { 0x6f528086, "BDXDE IOAT Ch2" }, 232 { 0x6f538086, "BDXDE IOAT Ch3" }, 233 234 { 0x6f208086, "BDX IOAT Ch0" }, 235 { 0x6f218086, "BDX IOAT Ch1" }, 236 { 0x6f228086, "BDX IOAT Ch2" }, 237 { 0x6f238086, "BDX IOAT Ch3" }, 238 { 0x6f248086, "BDX IOAT Ch4" }, 239 { 0x6f258086, "BDX IOAT Ch5" }, 240 { 0x6f268086, "BDX IOAT Ch6" }, 241 { 0x6f278086, "BDX IOAT Ch7" }, 242 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 243 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 244 245 { 0x00000000, NULL } 246}; 247 248/* 249 * OS <-> Driver linkage functions 250 */ 251static int 252ioat_probe(device_t device) 253{ 254 struct _pcsid *ep; 255 u_int32_t type; 256 257 type = pci_get_devid(device); 258 for (ep = pci_ids; ep->type; ep++) { 259 if (ep->type == type) { 260 device_set_desc(device, ep->desc); 261 return (0); 262 } 263 } 264 return (ENXIO); 265} 266 267static int 268ioat_attach(device_t device) 269{ 270 struct ioat_softc *ioat; 271 int error; 272 273 ioat = DEVICE2SOFTC(device); 274 ioat->device = device; 275 276 error = ioat_map_pci_bar(ioat); 277 if (error != 0) 278 goto err; 279 280 ioat->version = ioat_read_cbver(ioat); 281 if (ioat->version < IOAT_VER_3_0) { 282 error = ENODEV; 283 goto err; 284 } 285 286 error = ioat3_attach(device); 287 if (error != 0) 288 goto err; 289 290 error = pci_enable_busmaster(device); 291 if (error != 0) 292 goto err; 293 294 error = ioat_setup_intr(ioat); 295 if (error != 0) 296 goto err; 297 298 error = ioat_reset_hw(ioat); 299 if (error != 0) 300 goto err; 301 302 ioat_process_events(ioat); 303 ioat_setup_sysctl(device); 304 305 ioat->chan_idx = ioat_channel_index; 306 ioat_channel[ioat_channel_index++] = ioat; 307 ioat_test_attach(); 308 309err: 310 if (error != 0) 311 ioat_detach(device); 312 return (error); 313} 314 315static int 316ioat_detach(device_t device) 317{ 318 struct ioat_softc *ioat; 319 320 ioat = DEVICE2SOFTC(device); 321 322 ioat_test_detach(); 323 taskqueue_drain(taskqueue_thread, &ioat->reset_task); 324 325 mtx_lock(IOAT_REFLK); 326 ioat->quiescing = TRUE; 327 ioat->destroying = TRUE; 328 wakeup(&ioat->quiescing); 329 330 ioat_channel[ioat->chan_idx] = NULL; 331 332 ioat_drain_locked(ioat); 333 mtx_unlock(IOAT_REFLK); 334 335 ioat_teardown_intr(ioat); 336 callout_drain(&ioat->poll_timer); 337 callout_drain(&ioat->shrink_timer); 338 339 pci_disable_busmaster(device); 340 341 if (ioat->pci_resource != NULL) 342 bus_release_resource(device, SYS_RES_MEMORY, 343 ioat->pci_resource_id, ioat->pci_resource); 344 345 if (ioat->ring != NULL) 346 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 347 348 if (ioat->comp_update != NULL) { 349 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 350 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 351 ioat->comp_update_map); 352 bus_dma_tag_destroy(ioat->comp_update_tag); 353 } 354 355 bus_dma_tag_destroy(ioat->hw_desc_tag); 356 357 return (0); 358} 359 360static int 361ioat_teardown_intr(struct ioat_softc *ioat) 362{ 363 364 if (ioat->tag != NULL) 365 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 366 367 if (ioat->res != NULL) 368 bus_release_resource(ioat->device, SYS_RES_IRQ, 369 rman_get_rid(ioat->res), ioat->res); 370 371 pci_release_msi(ioat->device); 372 return (0); 373} 374 375static int 376ioat_start_channel(struct ioat_softc *ioat) 377{ 378 uint64_t status; 379 uint32_t chanerr; 380 int i; 381 382 ioat_acquire(&ioat->dmaengine); 383 ioat_null(&ioat->dmaengine, NULL, NULL, 0); 384 ioat_release(&ioat->dmaengine); 385 386 for (i = 0; i < 100; i++) { 387 DELAY(1); 388 status = ioat_get_chansts(ioat); 389 if (is_ioat_idle(status)) 390 return (0); 391 } 392 393 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 394 ioat_log_message(0, "could not start channel: " 395 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 396 IOAT_CHANERR_STR); 397 return (ENXIO); 398} 399 400/* 401 * Initialize Hardware 402 */ 403static int 404ioat3_attach(device_t device) 405{ 406 struct ioat_softc *ioat; 407 struct ioat_descriptor **ring; 408 struct ioat_descriptor *next; 409 struct ioat_dma_hw_descriptor *dma_hw_desc; 410 int i, num_descriptors; 411 int error; 412 uint8_t xfercap; 413 414 error = 0; 415 ioat = DEVICE2SOFTC(device); 416 ioat->capabilities = ioat_read_dmacapability(ioat); 417 418 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 419 IOAT_DMACAP_STR); 420 421 xfercap = ioat_read_xfercap(ioat); 422 ioat->max_xfer_size = 1 << xfercap; 423 424 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 425 IOAT_INTRDELAY_SUPPORTED) != 0; 426 if (ioat->intrdelay_supported) 427 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 428 429 /* TODO: need to check DCA here if we ever do XOR/PQ */ 430 431 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 432 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 433 callout_init(&ioat->poll_timer, 1); 434 callout_init(&ioat->shrink_timer, 1); 435 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 436 437 /* Establish lock order for Witness */ 438 mtx_lock(&ioat->submit_lock); 439 mtx_lock(&ioat->cleanup_lock); 440 mtx_unlock(&ioat->cleanup_lock); 441 mtx_unlock(&ioat->submit_lock); 442 443 ioat->is_resize_pending = FALSE; 444 ioat->is_completion_pending = FALSE; 445 ioat->is_reset_pending = FALSE; 446 ioat->is_channel_running = FALSE; 447 448 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 449 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 450 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 451 &ioat->comp_update_tag); 452 453 error = bus_dmamem_alloc(ioat->comp_update_tag, 454 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); 455 if (ioat->comp_update == NULL) 456 return (ENOMEM); 457 458 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 459 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 460 0); 461 if (error != 0) 462 return (error); 463 464 ioat->ring_size_order = IOAT_MIN_ORDER; 465 466 num_descriptors = 1 << ioat->ring_size_order; 467 468 bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, 469 BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL, 470 sizeof(struct ioat_dma_hw_descriptor), 1, 471 sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, 472 &ioat->hw_desc_tag); 473 474 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 475 M_ZERO | M_WAITOK); 476 477 ring = ioat->ring; 478 for (i = 0; i < num_descriptors; i++) { 479 ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); 480 if (ring[i] == NULL) 481 return (ENOMEM); 482 483 ring[i]->id = i; 484 } 485 486 for (i = 0; i < num_descriptors - 1; i++) { 487 next = ring[i + 1]; 488 dma_hw_desc = ring[i]->u.dma; 489 490 dma_hw_desc->next = next->hw_desc_bus_addr; 491 } 492 493 ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; 494 495 ioat->head = ioat->hw_head = 0; 496 ioat->tail = 0; 497 ioat->last_seen = 0; 498 return (0); 499} 500 501static int 502ioat_map_pci_bar(struct ioat_softc *ioat) 503{ 504 505 ioat->pci_resource_id = PCIR_BAR(0); 506 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 507 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 508 509 if (ioat->pci_resource == NULL) { 510 ioat_log_message(0, "unable to allocate pci resource\n"); 511 return (ENODEV); 512 } 513 514 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 515 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 516 return (0); 517} 518 519static void 520ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 521{ 522 struct ioat_softc *ioat = arg; 523 524 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 525 ioat->comp_update_bus_addr = seg[0].ds_addr; 526} 527 528static void 529ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 530{ 531 bus_addr_t *baddr; 532 533 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 534 baddr = arg; 535 *baddr = segs->ds_addr; 536} 537 538/* 539 * Interrupt setup and handlers 540 */ 541static int 542ioat_setup_intr(struct ioat_softc *ioat) 543{ 544 uint32_t num_vectors; 545 int error; 546 boolean_t use_msix; 547 boolean_t force_legacy_interrupts; 548 549 use_msix = FALSE; 550 force_legacy_interrupts = FALSE; 551 552 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 553 num_vectors = 1; 554 pci_alloc_msix(ioat->device, &num_vectors); 555 if (num_vectors == 1) 556 use_msix = TRUE; 557 } 558 559 if (use_msix) { 560 ioat->rid = 1; 561 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 562 &ioat->rid, RF_ACTIVE); 563 } else { 564 ioat->rid = 0; 565 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 566 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 567 } 568 if (ioat->res == NULL) { 569 ioat_log_message(0, "bus_alloc_resource failed\n"); 570 return (ENOMEM); 571 } 572 573 ioat->tag = NULL; 574 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 575 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 576 if (error != 0) { 577 ioat_log_message(0, "bus_setup_intr failed\n"); 578 return (error); 579 } 580 581 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 582 return (0); 583} 584 585static boolean_t 586ioat_model_resets_msix(struct ioat_softc *ioat) 587{ 588 u_int32_t pciid; 589 590 pciid = pci_get_devid(ioat->device); 591 switch (pciid) { 592 /* BWD: */ 593 case 0x0c508086: 594 case 0x0c518086: 595 case 0x0c528086: 596 case 0x0c538086: 597 /* BDXDE: */ 598 case 0x6f508086: 599 case 0x6f518086: 600 case 0x6f528086: 601 case 0x6f538086: 602 return (TRUE); 603 } 604 605 return (FALSE); 606} 607 608static void 609ioat_interrupt_handler(void *arg) 610{ 611 struct ioat_softc *ioat = arg; 612 613 ioat->stats.interrupts++; 614 ioat_process_events(ioat); 615} 616 617static int 618chanerr_to_errno(uint32_t chanerr) 619{ 620 621 if (chanerr == 0) 622 return (0); 623 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 624 return (EFAULT); 625 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 626 return (EIO); 627 /* This one is probably our fault: */ 628 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 629 return (EIO); 630 return (EIO); 631} 632 633static void 634ioat_process_events(struct ioat_softc *ioat) 635{ 636 struct ioat_descriptor *desc; 637 struct bus_dmadesc *dmadesc; 638 uint64_t comp_update, status; 639 uint32_t completed, chanerr; 640 boolean_t pending; 641 int error; 642 643 mtx_lock(&ioat->cleanup_lock); 644 645 completed = 0; 646 comp_update = *ioat->comp_update; 647 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 648 649 CTR0(KTR_IOAT, __func__); 650 651 if (status == ioat->last_seen) { 652 /* 653 * If we landed in process_events and nothing has been 654 * completed, check for a timeout due to channel halt. 655 */ 656 comp_update = ioat_get_chansts(ioat); 657 goto out; 658 } 659 660 while (1) { 661 desc = ioat_get_ring_entry(ioat, ioat->tail); 662 dmadesc = &desc->bus_dmadesc; 663 CTR1(KTR_IOAT, "completing desc %d", ioat->tail); 664 665 if (dmadesc->callback_fn != NULL) 666 dmadesc->callback_fn(dmadesc->callback_arg, 0); 667 668 completed++; 669 ioat->tail++; 670 if (desc->hw_desc_bus_addr == status) 671 break; 672 } 673 674 ioat->last_seen = desc->hw_desc_bus_addr; 675 ioat->stats.descriptors_processed += completed; 676 677out: 678 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 679 680 /* Perform a racy check first; only take the locks if it passes. */ 681 pending = (ioat_get_active(ioat) != 0); 682 if (!pending && ioat->is_completion_pending) { 683 mtx_unlock(&ioat->cleanup_lock); 684 mtx_lock(&ioat->submit_lock); 685 mtx_lock(&ioat->cleanup_lock); 686 687 pending = (ioat_get_active(ioat) != 0); 688 if (!pending && ioat->is_completion_pending) { 689 ioat->is_completion_pending = FALSE; 690 callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD, 691 ioat_shrink_timer_callback, ioat); 692 callout_stop(&ioat->poll_timer); 693 } 694 mtx_unlock(&ioat->submit_lock); 695 } 696 mtx_unlock(&ioat->cleanup_lock); 697 698 if (pending) 699 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 700 ioat); 701 702 if (completed != 0) { 703 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 704 wakeup(&ioat->tail); 705 } 706 707 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 708 return; 709 710 ioat->stats.channel_halts++; 711 712 /* 713 * Fatal programming error on this DMA channel. Flush any outstanding 714 * work with error status and restart the engine. 715 */ 716 ioat_log_message(0, "Channel halted due to fatal programming error\n"); 717 mtx_lock(&ioat->submit_lock); 718 mtx_lock(&ioat->cleanup_lock); 719 ioat->quiescing = TRUE; 720 721 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 722 ioat_halted_debug(ioat, chanerr); 723 ioat->stats.last_halt_chanerr = chanerr; 724 725 while (ioat_get_active(ioat) > 0) { 726 desc = ioat_get_ring_entry(ioat, ioat->tail); 727 dmadesc = &desc->bus_dmadesc; 728 CTR1(KTR_IOAT, "completing err desc %d", ioat->tail); 729 730 if (dmadesc->callback_fn != NULL) 731 dmadesc->callback_fn(dmadesc->callback_arg, 732 chanerr_to_errno(chanerr)); 733 734 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); 735 ioat->tail++; 736 ioat->stats.descriptors_processed++; 737 ioat->stats.descriptors_error++; 738 } 739 740 /* Clear error status */ 741 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 742 743 mtx_unlock(&ioat->cleanup_lock); 744 mtx_unlock(&ioat->submit_lock); 745 746 ioat_log_message(0, "Resetting channel to recover from error\n"); 747 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 748 KASSERT(error == 0, 749 ("%s: taskqueue_enqueue failed: %d", __func__, error)); 750} 751 752static void 753ioat_reset_hw_task(void *ctx, int pending __unused) 754{ 755 struct ioat_softc *ioat; 756 int error; 757 758 ioat = ctx; 759 ioat_log_message(1, "%s: Resetting channel\n", __func__); 760 761 error = ioat_reset_hw(ioat); 762 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 763 (void)error; 764} 765 766/* 767 * User API functions 768 */ 769unsigned 770ioat_get_nchannels(void) 771{ 772 773 return (ioat_channel_index); 774} 775 776bus_dmaengine_t 777ioat_get_dmaengine(uint32_t index, int flags) 778{ 779 struct ioat_softc *ioat; 780 781 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 782 ("invalid flags: 0x%08x", flags)); 783 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 784 ("invalid wait | nowait")); 785 786 if (index >= ioat_channel_index) 787 return (NULL); 788 789 ioat = ioat_channel[index]; 790 if (ioat == NULL || ioat->destroying) 791 return (NULL); 792 793 if (ioat->quiescing) { 794 if ((flags & M_NOWAIT) != 0) 795 return (NULL); 796 797 mtx_lock(IOAT_REFLK); 798 while (ioat->quiescing && !ioat->destroying) 799 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0); 800 mtx_unlock(IOAT_REFLK); 801 802 if (ioat->destroying) 803 return (NULL); 804 } 805 806 /* 807 * There's a race here between the quiescing check and HW reset or 808 * module destroy. 809 */ 810 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine); 811} 812 813void 814ioat_put_dmaengine(bus_dmaengine_t dmaengine) 815{ 816 struct ioat_softc *ioat; 817 818 ioat = to_ioat_softc(dmaengine); 819 ioat_put(ioat, IOAT_DMAENGINE_REF); 820} 821 822int 823ioat_get_hwversion(bus_dmaengine_t dmaengine) 824{ 825 struct ioat_softc *ioat; 826 827 ioat = to_ioat_softc(dmaengine); 828 return (ioat->version); 829} 830 831size_t 832ioat_get_max_io_size(bus_dmaengine_t dmaengine) 833{ 834 struct ioat_softc *ioat; 835 836 ioat = to_ioat_softc(dmaengine); 837 return (ioat->max_xfer_size); 838} 839 840int 841ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 842{ 843 struct ioat_softc *ioat; 844 845 ioat = to_ioat_softc(dmaengine); 846 if (!ioat->intrdelay_supported) 847 return (ENODEV); 848 if (delay > ioat->intrdelay_max) 849 return (ERANGE); 850 851 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 852 ioat->cached_intrdelay = 853 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 854 return (0); 855} 856 857uint16_t 858ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 859{ 860 struct ioat_softc *ioat; 861 862 ioat = to_ioat_softc(dmaengine); 863 return (ioat->intrdelay_max); 864} 865 866void 867ioat_acquire(bus_dmaengine_t dmaengine) 868{ 869 struct ioat_softc *ioat; 870 871 ioat = to_ioat_softc(dmaengine); 872 mtx_lock(&ioat->submit_lock); 873 CTR0(KTR_IOAT, __func__); 874} 875 876int 877ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 878{ 879 struct ioat_softc *ioat; 880 int error; 881 882 ioat = to_ioat_softc(dmaengine); 883 ioat_acquire(dmaengine); 884 885 error = ioat_reserve_space(ioat, n, mflags); 886 if (error != 0) 887 ioat_release(dmaengine); 888 return (error); 889} 890 891void 892ioat_release(bus_dmaengine_t dmaengine) 893{ 894 struct ioat_softc *ioat; 895 896 ioat = to_ioat_softc(dmaengine); 897 CTR0(KTR_IOAT, __func__); 898 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); 899 mtx_unlock(&ioat->submit_lock); 900} 901 902static struct ioat_descriptor * 903ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 904 uint32_t size, uint64_t src, uint64_t dst, 905 bus_dmaengine_callback_t callback_fn, void *callback_arg, 906 uint32_t flags) 907{ 908 struct ioat_generic_hw_descriptor *hw_desc; 909 struct ioat_descriptor *desc; 910 int mflags; 911 912 mtx_assert(&ioat->submit_lock, MA_OWNED); 913 914 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 915 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 916 if ((flags & DMA_NO_WAIT) != 0) 917 mflags = M_NOWAIT; 918 else 919 mflags = M_WAITOK; 920 921 if (size > ioat->max_xfer_size) { 922 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", 923 __func__, ioat->max_xfer_size, (unsigned)size); 924 return (NULL); 925 } 926 927 if (ioat_reserve_space(ioat, 1, mflags) != 0) 928 return (NULL); 929 930 desc = ioat_get_ring_entry(ioat, ioat->head); 931 hw_desc = desc->u.generic; 932 933 hw_desc->u.control_raw = 0; 934 hw_desc->u.control_generic.op = op; 935 hw_desc->u.control_generic.completion_update = 1; 936 937 if ((flags & DMA_INT_EN) != 0) 938 hw_desc->u.control_generic.int_enable = 1; 939 if ((flags & DMA_FENCE) != 0) 940 hw_desc->u.control_generic.fence = 1; 941 942 hw_desc->size = size; 943 hw_desc->src_addr = src; 944 hw_desc->dest_addr = dst; 945 946 desc->bus_dmadesc.callback_fn = callback_fn; 947 desc->bus_dmadesc.callback_arg = callback_arg; 948 return (desc); 949} 950 951struct bus_dmadesc * 952ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 953 void *callback_arg, uint32_t flags) 954{ 955 struct ioat_dma_hw_descriptor *hw_desc; 956 struct ioat_descriptor *desc; 957 struct ioat_softc *ioat; 958 959 CTR0(KTR_IOAT, __func__); 960 ioat = to_ioat_softc(dmaengine); 961 962 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 963 callback_arg, flags); 964 if (desc == NULL) 965 return (NULL); 966 967 hw_desc = desc->u.dma; 968 hw_desc->u.control.null = 1; 969 ioat_submit_single(ioat); 970 return (&desc->bus_dmadesc); 971} 972 973struct bus_dmadesc * 974ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 975 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 976 void *callback_arg, uint32_t flags) 977{ 978 struct ioat_dma_hw_descriptor *hw_desc; 979 struct ioat_descriptor *desc; 980 struct ioat_softc *ioat; 981 982 CTR0(KTR_IOAT, __func__); 983 ioat = to_ioat_softc(dmaengine); 984 985 if (((src | dst) & (0xffffull << 48)) != 0) { 986 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 987 __func__); 988 return (NULL); 989 } 990 991 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 992 callback_arg, flags); 993 if (desc == NULL) 994 return (NULL); 995 996 hw_desc = desc->u.dma; 997 if (g_ioat_debug_level >= 3) 998 dump_descriptor(hw_desc); 999 1000 ioat_submit_single(ioat); 1001 return (&desc->bus_dmadesc); 1002} 1003 1004struct bus_dmadesc * 1005ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1006 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1007 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1008{ 1009 struct ioat_dma_hw_descriptor *hw_desc; 1010 struct ioat_descriptor *desc; 1011 struct ioat_softc *ioat; 1012 1013 CTR0(KTR_IOAT, __func__); 1014 ioat = to_ioat_softc(dmaengine); 1015 1016 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) { 1017 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 1018 __func__); 1019 return (NULL); 1020 } 1021 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) { 1022 ioat_log_message(0, "%s: Addresses must be page-aligned\n", 1023 __func__); 1024 return (NULL); 1025 } 1026 1027 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, 1028 callback_fn, callback_arg, flags); 1029 if (desc == NULL) 1030 return (NULL); 1031 1032 hw_desc = desc->u.dma; 1033 if (src2 != src1 + PAGE_SIZE) { 1034 hw_desc->u.control.src_page_break = 1; 1035 hw_desc->next_src_addr = src2; 1036 } 1037 if (dst2 != dst1 + PAGE_SIZE) { 1038 hw_desc->u.control.dest_page_break = 1; 1039 hw_desc->next_dest_addr = dst2; 1040 } 1041 1042 if (g_ioat_debug_level >= 3) 1043 dump_descriptor(hw_desc); 1044 1045 ioat_submit_single(ioat); 1046 return (&desc->bus_dmadesc); 1047} 1048 1049struct bus_dmadesc * 1050ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1051 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1052 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1053{ 1054 struct ioat_crc32_hw_descriptor *hw_desc; 1055 struct ioat_descriptor *desc; 1056 struct ioat_softc *ioat; 1057 uint32_t teststore; 1058 uint8_t op; 1059 1060 CTR0(KTR_IOAT, __func__); 1061 ioat = to_ioat_softc(dmaengine); 1062 1063 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) { 1064 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n", 1065 __func__); 1066 return (NULL); 1067 } 1068 if (((src | dst) & (0xffffffull << 40)) != 0) { 1069 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n", 1070 __func__); 1071 return (NULL); 1072 } 1073 teststore = (flags & _DMA_CRC_TESTSTORE); 1074 if (teststore == _DMA_CRC_TESTSTORE) { 1075 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1076 return (NULL); 1077 } 1078 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1079 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1080 __func__); 1081 return (NULL); 1082 } 1083 1084 switch (teststore) { 1085 case DMA_CRC_STORE: 1086 op = IOAT_OP_MOVECRC_STORE; 1087 break; 1088 case DMA_CRC_TEST: 1089 op = IOAT_OP_MOVECRC_TEST; 1090 break; 1091 default: 1092 KASSERT(teststore == 0, ("bogus")); 1093 op = IOAT_OP_MOVECRC; 1094 break; 1095 } 1096 1097 if ((flags & DMA_CRC_INLINE) == 0 && 1098 (crcptr & (0xffffffull << 40)) != 0) { 1099 ioat_log_message(0, 1100 "%s: High 24 bits of crcptr invalid\n", __func__); 1101 return (NULL); 1102 } 1103 1104 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1105 callback_arg, flags & ~_DMA_CRC_FLAGS); 1106 if (desc == NULL) 1107 return (NULL); 1108 1109 hw_desc = desc->u.crc32; 1110 1111 if ((flags & DMA_CRC_INLINE) == 0) 1112 hw_desc->crc_address = crcptr; 1113 else 1114 hw_desc->u.control.crc_location = 1; 1115 1116 if (initialseed != NULL) { 1117 hw_desc->u.control.use_seed = 1; 1118 hw_desc->seed = *initialseed; 1119 } 1120 1121 if (g_ioat_debug_level >= 3) 1122 dump_descriptor(hw_desc); 1123 1124 ioat_submit_single(ioat); 1125 return (&desc->bus_dmadesc); 1126} 1127 1128struct bus_dmadesc * 1129ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1130 uint32_t *initialseed, bus_addr_t crcptr, 1131 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1132{ 1133 struct ioat_crc32_hw_descriptor *hw_desc; 1134 struct ioat_descriptor *desc; 1135 struct ioat_softc *ioat; 1136 uint32_t teststore; 1137 uint8_t op; 1138 1139 CTR0(KTR_IOAT, __func__); 1140 ioat = to_ioat_softc(dmaengine); 1141 1142 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) { 1143 ioat_log_message(0, "%s: Device lacks CRC capability\n", 1144 __func__); 1145 return (NULL); 1146 } 1147 if ((src & (0xffffffull << 40)) != 0) { 1148 ioat_log_message(0, "%s: High 24 bits of src invalid\n", 1149 __func__); 1150 return (NULL); 1151 } 1152 teststore = (flags & _DMA_CRC_TESTSTORE); 1153 if (teststore == _DMA_CRC_TESTSTORE) { 1154 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1155 return (NULL); 1156 } 1157 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1158 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1159 __func__); 1160 return (NULL); 1161 } 1162 1163 switch (teststore) { 1164 case DMA_CRC_STORE: 1165 op = IOAT_OP_CRC_STORE; 1166 break; 1167 case DMA_CRC_TEST: 1168 op = IOAT_OP_CRC_TEST; 1169 break; 1170 default: 1171 KASSERT(teststore == 0, ("bogus")); 1172 op = IOAT_OP_CRC; 1173 break; 1174 } 1175 1176 if ((flags & DMA_CRC_INLINE) == 0 && 1177 (crcptr & (0xffffffull << 40)) != 0) { 1178 ioat_log_message(0, 1179 "%s: High 24 bits of crcptr invalid\n", __func__); 1180 return (NULL); 1181 } 1182 1183 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1184 callback_arg, flags & ~_DMA_CRC_FLAGS); 1185 if (desc == NULL) 1186 return (NULL); 1187 1188 hw_desc = desc->u.crc32; 1189 1190 if ((flags & DMA_CRC_INLINE) == 0) 1191 hw_desc->crc_address = crcptr; 1192 else 1193 hw_desc->u.control.crc_location = 1; 1194 1195 if (initialseed != NULL) { 1196 hw_desc->u.control.use_seed = 1; 1197 hw_desc->seed = *initialseed; 1198 } 1199 1200 if (g_ioat_debug_level >= 3) 1201 dump_descriptor(hw_desc); 1202 1203 ioat_submit_single(ioat); 1204 return (&desc->bus_dmadesc); 1205} 1206 1207struct bus_dmadesc * 1208ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1209 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1210 uint32_t flags) 1211{ 1212 struct ioat_fill_hw_descriptor *hw_desc; 1213 struct ioat_descriptor *desc; 1214 struct ioat_softc *ioat; 1215 1216 CTR0(KTR_IOAT, __func__); 1217 ioat = to_ioat_softc(dmaengine); 1218 1219 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { 1220 ioat_log_message(0, "%s: Device lacks BFILL capability\n", 1221 __func__); 1222 return (NULL); 1223 } 1224 1225 if ((dst & (0xffffull << 48)) != 0) { 1226 ioat_log_message(0, "%s: High 16 bits of dst invalid\n", 1227 __func__); 1228 return (NULL); 1229 } 1230 1231 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 1232 callback_fn, callback_arg, flags); 1233 if (desc == NULL) 1234 return (NULL); 1235 1236 hw_desc = desc->u.fill; 1237 if (g_ioat_debug_level >= 3) 1238 dump_descriptor(hw_desc); 1239 1240 ioat_submit_single(ioat); 1241 return (&desc->bus_dmadesc); 1242} 1243 1244/* 1245 * Ring Management 1246 */ 1247static inline uint32_t 1248ioat_get_active(struct ioat_softc *ioat) 1249{ 1250 1251 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1252} 1253 1254static inline uint32_t 1255ioat_get_ring_space(struct ioat_softc *ioat) 1256{ 1257 1258 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1259} 1260 1261static struct ioat_descriptor * 1262ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) 1263{ 1264 struct ioat_generic_hw_descriptor *hw_desc; 1265 struct ioat_descriptor *desc; 1266 int error, busdmaflag; 1267 1268 error = ENOMEM; 1269 hw_desc = NULL; 1270 1271 if ((mflags & M_WAITOK) != 0) 1272 busdmaflag = BUS_DMA_WAITOK; 1273 else 1274 busdmaflag = BUS_DMA_NOWAIT; 1275 1276 desc = malloc(sizeof(*desc), M_IOAT, mflags); 1277 if (desc == NULL) 1278 goto out; 1279 1280 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, 1281 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); 1282 if (hw_desc == NULL) 1283 goto out; 1284 1285 memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc)); 1286 desc->u.generic = hw_desc; 1287 1288 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 1289 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 1290 busdmaflag); 1291 if (error) 1292 goto out; 1293 1294out: 1295 if (error) { 1296 ioat_free_ring_entry(ioat, desc); 1297 return (NULL); 1298 } 1299 return (desc); 1300} 1301 1302static void 1303ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) 1304{ 1305 1306 if (desc == NULL) 1307 return; 1308 1309 if (desc->u.generic) 1310 bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic, 1311 ioat->hw_desc_map); 1312 free(desc, M_IOAT); 1313} 1314 1315/* 1316 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1317 * for 'num_descs'. 1318 * 1319 * If mflags contains M_WAITOK, blocks until enough space is available. 1320 * 1321 * Returns zero on success, or an errno on error. If num_descs is beyond the 1322 * maximum ring size, returns EINVAl; if allocation would block and mflags 1323 * contains M_NOWAIT, returns EAGAIN. 1324 * 1325 * Must be called with the submit_lock held; returns with the lock held. The 1326 * lock may be dropped to allocate the ring. 1327 * 1328 * (The submit_lock is needed to add any entries to the ring, so callers are 1329 * assured enough room is available.) 1330 */ 1331static int 1332ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1333{ 1334 struct ioat_descriptor **new_ring; 1335 uint32_t order; 1336 int error; 1337 1338 mtx_assert(&ioat->submit_lock, MA_OWNED); 1339 error = 0; 1340 1341 if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) { 1342 error = EINVAL; 1343 goto out; 1344 } 1345 if (ioat->quiescing) { 1346 error = ENXIO; 1347 goto out; 1348 } 1349 1350 for (;;) { 1351 if (ioat_get_ring_space(ioat) >= num_descs) 1352 goto out; 1353 1354 order = ioat->ring_size_order; 1355 if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { 1356 if ((mflags & M_WAITOK) != 0) { 1357 msleep(&ioat->tail, &ioat->submit_lock, 0, 1358 "ioat_rsz", 0); 1359 continue; 1360 } 1361 1362 error = EAGAIN; 1363 break; 1364 } 1365 1366 ioat->is_resize_pending = TRUE; 1367 for (;;) { 1368 mtx_unlock(&ioat->submit_lock); 1369 1370 new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), 1371 TRUE, mflags); 1372 1373 mtx_lock(&ioat->submit_lock); 1374 KASSERT(ioat->ring_size_order == order, 1375 ("is_resize_pending should protect order")); 1376 1377 if (new_ring == NULL) { 1378 KASSERT((mflags & M_WAITOK) == 0, 1379 ("allocation failed")); 1380 error = EAGAIN; 1381 break; 1382 } 1383 1384 error = ring_grow(ioat, order, new_ring); 1385 if (error == 0) 1386 break; 1387 } 1388 ioat->is_resize_pending = FALSE; 1389 wakeup(&ioat->tail); 1390 if (error) 1391 break; 1392 } 1393 1394out: 1395 mtx_assert(&ioat->submit_lock, MA_OWNED); 1396 return (error); 1397} 1398 1399static struct ioat_descriptor ** 1400ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, 1401 int mflags) 1402{ 1403 struct ioat_descriptor **ring; 1404 uint32_t i; 1405 int error; 1406 1407 KASSERT(size > 0 && powerof2(size), ("bogus size")); 1408 1409 ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); 1410 if (ring == NULL) 1411 return (NULL); 1412 1413 if (need_dscr) { 1414 error = ENOMEM; 1415 for (i = size / 2; i < size; i++) { 1416 ring[i] = ioat_alloc_ring_entry(ioat, mflags); 1417 if (ring[i] == NULL) 1418 goto out; 1419 ring[i]->id = i; 1420 } 1421 } 1422 error = 0; 1423 1424out: 1425 if (error != 0 && ring != NULL) { 1426 ioat_free_ring(ioat, size, ring); 1427 ring = NULL; 1428 } 1429 return (ring); 1430} 1431 1432static void 1433ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1434 struct ioat_descriptor **ring) 1435{ 1436 uint32_t i; 1437 1438 for (i = 0; i < size; i++) { 1439 if (ring[i] != NULL) 1440 ioat_free_ring_entry(ioat, ring[i]); 1441 } 1442 free(ring, M_IOAT); 1443} 1444 1445static struct ioat_descriptor * 1446ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1447{ 1448 1449 return (ioat->ring[index % (1 << ioat->ring_size_order)]); 1450} 1451 1452static int 1453ring_grow(struct ioat_softc *ioat, uint32_t oldorder, 1454 struct ioat_descriptor **newring) 1455{ 1456 struct ioat_descriptor *tmp, *next; 1457 struct ioat_dma_hw_descriptor *hw; 1458 uint32_t oldsize, newsize, head, tail, i, end; 1459 int error; 1460 1461 CTR0(KTR_IOAT, __func__); 1462 1463 mtx_assert(&ioat->submit_lock, MA_OWNED); 1464 1465 if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { 1466 error = EINVAL; 1467 goto out; 1468 } 1469 1470 oldsize = (1 << oldorder); 1471 newsize = (1 << (oldorder + 1)); 1472 1473 mtx_lock(&ioat->cleanup_lock); 1474 1475 head = ioat->head & (oldsize - 1); 1476 tail = ioat->tail & (oldsize - 1); 1477 1478 /* Copy old descriptors to new ring */ 1479 for (i = 0; i < oldsize; i++) 1480 newring[i] = ioat->ring[i]; 1481 1482 /* 1483 * If head has wrapped but tail hasn't, we must swap some descriptors 1484 * around so that tail can increment directly to head. 1485 */ 1486 if (head < tail) { 1487 for (i = 0; i <= head; i++) { 1488 tmp = newring[oldsize + i]; 1489 1490 newring[oldsize + i] = newring[i]; 1491 newring[oldsize + i]->id = oldsize + i; 1492 1493 newring[i] = tmp; 1494 newring[i]->id = i; 1495 } 1496 head += oldsize; 1497 } 1498 1499 KASSERT(head >= tail, ("invariants")); 1500 1501 /* Head didn't wrap; we only need to link in oldsize..newsize */ 1502 if (head < oldsize) { 1503 i = oldsize - 1; 1504 end = newsize; 1505 } else { 1506 /* Head did wrap; link newhead..newsize and 0..oldhead */ 1507 i = head; 1508 end = newsize + (head - oldsize) + 1; 1509 } 1510 1511 /* 1512 * Fix up hardware ring, being careful not to trample the active 1513 * section (tail -> head). 1514 */ 1515 for (; i < end; i++) { 1516 KASSERT((i & (newsize - 1)) < tail || 1517 (i & (newsize - 1)) >= head, ("trampling snake")); 1518 1519 next = newring[(i + 1) & (newsize - 1)]; 1520 hw = newring[i & (newsize - 1)]->u.dma; 1521 hw->next = next->hw_desc_bus_addr; 1522 } 1523 1524 free(ioat->ring, M_IOAT); 1525 ioat->ring = newring; 1526 ioat->ring_size_order = oldorder + 1; 1527 ioat->tail = tail; 1528 ioat->head = head; 1529 error = 0; 1530 1531 mtx_unlock(&ioat->cleanup_lock); 1532out: 1533 if (error) 1534 ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); 1535 return (error); 1536} 1537 1538static int 1539ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, 1540 struct ioat_descriptor **newring) 1541{ 1542 struct ioat_dma_hw_descriptor *hw; 1543 struct ioat_descriptor *ent, *next; 1544 uint32_t oldsize, newsize, current_idx, new_idx, i; 1545 int error; 1546 1547 CTR0(KTR_IOAT, __func__); 1548 1549 mtx_assert(&ioat->submit_lock, MA_OWNED); 1550 1551 if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { 1552 error = EINVAL; 1553 goto out_unlocked; 1554 } 1555 1556 oldsize = (1 << oldorder); 1557 newsize = (1 << (oldorder - 1)); 1558 1559 mtx_lock(&ioat->cleanup_lock); 1560 1561 /* Can't shrink below current active set! */ 1562 if (ioat_get_active(ioat) >= newsize) { 1563 error = ENOMEM; 1564 goto out; 1565 } 1566 1567 /* 1568 * Copy current descriptors to the new ring, dropping the removed 1569 * descriptors. 1570 */ 1571 for (i = 0; i < newsize; i++) { 1572 current_idx = (ioat->tail + i) & (oldsize - 1); 1573 new_idx = (ioat->tail + i) & (newsize - 1); 1574 1575 newring[new_idx] = ioat->ring[current_idx]; 1576 newring[new_idx]->id = new_idx; 1577 } 1578 1579 /* Free deleted descriptors */ 1580 for (i = newsize; i < oldsize; i++) { 1581 ent = ioat_get_ring_entry(ioat, ioat->tail + i); 1582 ioat_free_ring_entry(ioat, ent); 1583 } 1584 1585 /* Fix up hardware ring. */ 1586 hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; 1587 next = newring[(ioat->tail + newsize) & (newsize - 1)]; 1588 hw->next = next->hw_desc_bus_addr; 1589 1590 free(ioat->ring, M_IOAT); 1591 ioat->ring = newring; 1592 ioat->ring_size_order = oldorder - 1; 1593 error = 0; 1594 1595out: 1596 mtx_unlock(&ioat->cleanup_lock); 1597out_unlocked: 1598 if (error) 1599 ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); 1600 return (error); 1601} 1602 1603static void 1604ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1605{ 1606 struct ioat_descriptor *desc; 1607 1608 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1609 IOAT_CHANERR_STR); 1610 if (chanerr == 0) 1611 return; 1612 1613 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1614 1615 desc = ioat_get_ring_entry(ioat, ioat->tail + 0); 1616 dump_descriptor(desc->u.raw); 1617 1618 desc = ioat_get_ring_entry(ioat, ioat->tail + 1); 1619 dump_descriptor(desc->u.raw); 1620} 1621 1622static void 1623ioat_poll_timer_callback(void *arg) 1624{ 1625 struct ioat_softc *ioat; 1626 1627 ioat = arg; 1628 ioat_log_message(3, "%s\n", __func__); 1629 1630 ioat_process_events(ioat); 1631} 1632 1633static void 1634ioat_shrink_timer_callback(void *arg) 1635{ 1636 struct ioat_descriptor **newring; 1637 struct ioat_softc *ioat; 1638 uint32_t order; 1639 1640 ioat = arg; 1641 ioat_log_message(1, "%s\n", __func__); 1642 1643 /* Slowly scale the ring down if idle. */ 1644 mtx_lock(&ioat->submit_lock); 1645 order = ioat->ring_size_order; 1646 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { 1647 mtx_unlock(&ioat->submit_lock); 1648 goto out; 1649 } 1650 ioat->is_resize_pending = TRUE; 1651 mtx_unlock(&ioat->submit_lock); 1652 1653 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, 1654 M_NOWAIT); 1655 1656 mtx_lock(&ioat->submit_lock); 1657 KASSERT(ioat->ring_size_order == order, 1658 ("resize_pending protects order")); 1659 1660 if (newring != NULL) 1661 ring_shrink(ioat, order, newring); 1662 1663 ioat->is_resize_pending = FALSE; 1664 mtx_unlock(&ioat->submit_lock); 1665 1666out: 1667 if (ioat->ring_size_order > IOAT_MIN_ORDER) 1668 callout_reset(&ioat->poll_timer, IOAT_SHRINK_PERIOD, 1669 ioat_shrink_timer_callback, ioat); 1670} 1671 1672/* 1673 * Support Functions 1674 */ 1675static void 1676ioat_submit_single(struct ioat_softc *ioat) 1677{ 1678 1679 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); 1680 atomic_add_rel_int(&ioat->head, 1); 1681 atomic_add_rel_int(&ioat->hw_head, 1); 1682 1683 if (!ioat->is_completion_pending) { 1684 ioat->is_completion_pending = TRUE; 1685 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 1686 ioat); 1687 callout_stop(&ioat->shrink_timer); 1688 } 1689 1690 ioat->stats.descriptors_submitted++; 1691} 1692 1693static int 1694ioat_reset_hw(struct ioat_softc *ioat) 1695{ 1696 uint64_t status; 1697 uint32_t chanerr; 1698 unsigned timeout; 1699 int error; 1700 1701 mtx_lock(IOAT_REFLK); 1702 ioat->quiescing = TRUE; 1703 ioat_drain_locked(ioat); 1704 mtx_unlock(IOAT_REFLK); 1705 1706 status = ioat_get_chansts(ioat); 1707 if (is_ioat_active(status) || is_ioat_idle(status)) 1708 ioat_suspend(ioat); 1709 1710 /* Wait at most 20 ms */ 1711 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1712 timeout < 20; timeout++) { 1713 DELAY(1000); 1714 status = ioat_get_chansts(ioat); 1715 } 1716 if (timeout == 20) { 1717 error = ETIMEDOUT; 1718 goto out; 1719 } 1720 1721 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1722 1723 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1724 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1725 1726 /* 1727 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1728 * that can cause stability issues for IOAT v3. 1729 */ 1730 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1731 4); 1732 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1733 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1734 1735 /* 1736 * BDXDE and BWD models reset MSI-X registers on device reset. 1737 * Save/restore their contents manually. 1738 */ 1739 if (ioat_model_resets_msix(ioat)) { 1740 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1741 pci_save_state(ioat->device); 1742 } 1743 1744 ioat_reset(ioat); 1745 1746 /* Wait at most 20 ms */ 1747 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1748 DELAY(1000); 1749 if (timeout == 20) { 1750 error = ETIMEDOUT; 1751 goto out; 1752 } 1753 1754 if (ioat_model_resets_msix(ioat)) { 1755 ioat_log_message(1, "device resets registers; restored\n"); 1756 pci_restore_state(ioat->device); 1757 } 1758 1759 /* Reset attempts to return the hardware to "halted." */ 1760 status = ioat_get_chansts(ioat); 1761 if (is_ioat_active(status) || is_ioat_idle(status)) { 1762 /* So this really shouldn't happen... */ 1763 ioat_log_message(0, "Device is active after a reset?\n"); 1764 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1765 error = 0; 1766 goto out; 1767 } 1768 1769 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1770 if (chanerr != 0) { 1771 mtx_lock(&ioat->cleanup_lock); 1772 ioat_halted_debug(ioat, chanerr); 1773 mtx_unlock(&ioat->cleanup_lock); 1774 error = EIO; 1775 goto out; 1776 } 1777 1778 /* 1779 * Bring device back online after reset. Writing CHAINADDR brings the 1780 * device back to active. 1781 * 1782 * The internal ring counter resets to zero, so we have to start over 1783 * at zero as well. 1784 */ 1785 ioat->tail = ioat->head = ioat->hw_head = 0; 1786 ioat->last_seen = 0; 1787 1788 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1789 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1790 ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); 1791 error = 0; 1792 1793out: 1794 mtx_lock(IOAT_REFLK); 1795 ioat->quiescing = FALSE; 1796 wakeup(&ioat->quiescing); 1797 mtx_unlock(IOAT_REFLK); 1798 1799 if (error == 0) 1800 error = ioat_start_channel(ioat); 1801 1802 return (error); 1803} 1804 1805static int 1806sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1807{ 1808 struct ioat_softc *ioat; 1809 struct sbuf sb; 1810 uint64_t status; 1811 int error; 1812 1813 ioat = arg1; 1814 1815 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1816 1817 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1818 switch (status) { 1819 case IOAT_CHANSTS_ACTIVE: 1820 sbuf_printf(&sb, "ACTIVE"); 1821 break; 1822 case IOAT_CHANSTS_IDLE: 1823 sbuf_printf(&sb, "IDLE"); 1824 break; 1825 case IOAT_CHANSTS_SUSPENDED: 1826 sbuf_printf(&sb, "SUSPENDED"); 1827 break; 1828 case IOAT_CHANSTS_HALTED: 1829 sbuf_printf(&sb, "HALTED"); 1830 break; 1831 case IOAT_CHANSTS_ARMED: 1832 sbuf_printf(&sb, "ARMED"); 1833 break; 1834 default: 1835 sbuf_printf(&sb, "UNKNOWN"); 1836 break; 1837 } 1838 error = sbuf_finish(&sb); 1839 sbuf_delete(&sb); 1840 1841 if (error != 0 || req->newptr == NULL) 1842 return (error); 1843 return (EINVAL); 1844} 1845 1846static int 1847sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1848{ 1849 struct ioat_softc *ioat; 1850 struct sbuf sb; 1851#define PRECISION "1" 1852 const uintmax_t factor = 10; 1853 uintmax_t rate; 1854 int error; 1855 1856 ioat = arg1; 1857 sbuf_new_for_sysctl(&sb, NULL, 16, req); 1858 1859 if (ioat->stats.interrupts == 0) { 1860 sbuf_printf(&sb, "NaN"); 1861 goto out; 1862 } 1863 rate = ioat->stats.descriptors_processed * factor / 1864 ioat->stats.interrupts; 1865 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1866 rate % factor); 1867#undef PRECISION 1868out: 1869 error = sbuf_finish(&sb); 1870 sbuf_delete(&sb); 1871 if (error != 0 || req->newptr == NULL) 1872 return (error); 1873 return (EINVAL); 1874} 1875 1876static int 1877sysctl_handle_error(SYSCTL_HANDLER_ARGS) 1878{ 1879 struct ioat_descriptor *desc; 1880 struct ioat_softc *ioat; 1881 int error, arg; 1882 1883 ioat = arg1; 1884 1885 arg = 0; 1886 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1887 if (error != 0 || req->newptr == NULL) 1888 return (error); 1889 1890 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1891 if (error != 0) 1892 return (error); 1893 1894 if (arg != 0) { 1895 ioat_acquire(&ioat->dmaengine); 1896 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1, 1897 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL, 1898 0); 1899 if (desc == NULL) 1900 error = ENOMEM; 1901 else 1902 ioat_submit_single(ioat); 1903 ioat_release(&ioat->dmaengine); 1904 } 1905 return (error); 1906} 1907 1908static int 1909sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1910{ 1911 struct ioat_softc *ioat; 1912 int error, arg; 1913 1914 ioat = arg1; 1915 1916 arg = 0; 1917 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1918 if (error != 0 || req->newptr == NULL) 1919 return (error); 1920 1921 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1922 if (error != 0) 1923 return (error); 1924 1925 if (arg != 0) 1926 error = ioat_reset_hw(ioat); 1927 1928 return (error); 1929} 1930 1931static void 1932dump_descriptor(void *hw_desc) 1933{ 1934 int i, j; 1935 1936 for (i = 0; i < 2; i++) { 1937 for (j = 0; j < 8; j++) 1938 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1939 printf("\n"); 1940 } 1941} 1942 1943static void 1944ioat_setup_sysctl(device_t device) 1945{ 1946 struct sysctl_oid_list *par, *statpar, *state, *hammer; 1947 struct sysctl_ctx_list *ctx; 1948 struct sysctl_oid *tree, *tmp; 1949 struct ioat_softc *ioat; 1950 1951 ioat = DEVICE2SOFTC(device); 1952 ctx = device_get_sysctl_ctx(device); 1953 tree = device_get_sysctl_tree(device); 1954 par = SYSCTL_CHILDREN(tree); 1955 1956 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1957 &ioat->version, 0, "HW version (0xMM form)"); 1958 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1959 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1960 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 1961 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 1962 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 1963 &ioat->intrdelay_max, 0, 1964 "Maximum configurable INTRDELAY on this channel (microseconds)"); 1965 1966 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 1967 "IOAT channel internal state"); 1968 state = SYSCTL_CHILDREN(tmp); 1969 1970 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1971 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1972 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 1973 0, "SW descriptor head pointer index"); 1974 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 1975 0, "SW descriptor tail pointer index"); 1976 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, 1977 &ioat->hw_head, 0, "HW DMACOUNT"); 1978 1979 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 1980 ioat->comp_update, "HW addr of last completion"); 1981 1982 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD, 1983 &ioat->is_resize_pending, 0, "resize pending"); 1984 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", 1985 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); 1986 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 1987 &ioat->is_reset_pending, 0, "reset pending"); 1988 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, 1989 &ioat->is_channel_running, 0, "channel running"); 1990 1991 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 1992 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1993 "String of the channel status"); 1994 1995 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 1996 &ioat->cached_intrdelay, 0, 1997 "Current INTRDELAY on this channel (cached, microseconds)"); 1998 1999 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 2000 "Big hammers (mostly for testing)"); 2001 hammer = SYSCTL_CHILDREN(tmp); 2002 2003 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 2004 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 2005 "Set to non-zero to reset the hardware"); 2006 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error", 2007 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I", 2008 "Set to non-zero to inject a recoverable hardware error"); 2009 2010 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 2011 "IOAT channel statistics"); 2012 statpar = SYSCTL_CHILDREN(tmp); 2013 2014 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 2015 &ioat->stats.interrupts, 2016 "Number of interrupts processed on this channel"); 2017 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 2018 &ioat->stats.descriptors_processed, 2019 "Number of descriptors processed on this channel"); 2020 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 2021 &ioat->stats.descriptors_submitted, 2022 "Number of descriptors submitted to this channel"); 2023 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 2024 &ioat->stats.descriptors_error, 2025 "Number of descriptors failed by channel errors"); 2026 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 2027 &ioat->stats.channel_halts, 0, 2028 "Number of times the channel has halted"); 2029 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 2030 &ioat->stats.last_halt_chanerr, 0, 2031 "The raw CHANERR when the channel was last halted"); 2032 2033 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 2034 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 2035 "Descriptors per interrupt"); 2036} 2037 2038static inline struct ioat_softc * 2039ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 2040{ 2041 uint32_t old; 2042 2043 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 2044 2045 old = atomic_fetchadd_32(&ioat->refcnt, 1); 2046 KASSERT(old < UINT32_MAX, ("refcnt overflow")); 2047 2048#ifdef INVARIANTS 2049 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); 2050 KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); 2051#endif 2052 2053 return (ioat); 2054} 2055 2056static inline void 2057ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 2058{ 2059 2060 _ioat_putn(ioat, n, kind, FALSE); 2061} 2062 2063static inline void 2064ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 2065{ 2066 2067 _ioat_putn(ioat, n, kind, TRUE); 2068} 2069 2070static inline void 2071_ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, 2072 boolean_t locked) 2073{ 2074 uint32_t old; 2075 2076 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 2077 2078 if (n == 0) 2079 return; 2080 2081#ifdef INVARIANTS 2082 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); 2083 KASSERT(old >= n, ("refcnt kind underflow")); 2084#endif 2085 2086 /* Skip acquiring the lock if resulting refcnt > 0. */ 2087 for (;;) { 2088 old = ioat->refcnt; 2089 if (old <= n) 2090 break; 2091 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 2092 return; 2093 } 2094 2095 if (locked) 2096 mtx_assert(IOAT_REFLK, MA_OWNED); 2097 else 2098 mtx_lock(IOAT_REFLK); 2099 2100 old = atomic_fetchadd_32(&ioat->refcnt, -n); 2101 KASSERT(old >= n, ("refcnt error")); 2102 2103 if (old == n) 2104 wakeup(IOAT_REFLK); 2105 if (!locked) 2106 mtx_unlock(IOAT_REFLK); 2107} 2108 2109static inline void 2110ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 2111{ 2112 2113 ioat_putn(ioat, 1, kind); 2114} 2115 2116static void 2117ioat_drain_locked(struct ioat_softc *ioat) 2118{ 2119 2120 mtx_assert(IOAT_REFLK, MA_OWNED); 2121 while (ioat->refcnt > 0) 2122 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 2123} 2124 2125#ifdef DDB 2126#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 2127#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 2128DB_SHOW_COMMAND(ioat, db_show_ioat) 2129{ 2130 struct ioat_softc *sc; 2131 unsigned idx; 2132 2133 if (!have_addr) 2134 goto usage; 2135 idx = (unsigned)addr; 2136 if (addr >= ioat_channel_index) 2137 goto usage; 2138 2139 sc = ioat_channel[idx]; 2140 db_printf("ioat softc at %p\n", sc); 2141 if (sc == NULL) 2142 return; 2143 2144 db_printf(" version: %d\n", sc->version); 2145 db_printf(" chan_idx: %u\n", sc->chan_idx); 2146 db_printf(" submit_lock: "); 2147 db_show_lock(&sc->submit_lock); 2148 2149 db_printf(" capabilities: %b\n", (int)sc->capabilities, 2150 IOAT_DMACAP_STR); 2151 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 2152 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 2153 2154 db_printf(" poll_timer:\n"); 2155 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 2156 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 2157 db_printf(" c_func: %p\n", sc->poll_timer.c_func); 2158 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 2159 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 2160 2161 db_printf(" shrink_timer:\n"); 2162 db_printf(" c_time: %ju\n", (uintmax_t)sc->shrink_timer.c_time); 2163 db_printf(" c_arg: %p\n", sc->shrink_timer.c_arg); 2164 db_printf(" c_func: %p\n", sc->shrink_timer.c_func); 2165 db_printf(" c_lock: %p\n", sc->shrink_timer.c_lock); 2166 db_printf(" c_flags: 0x%x\n", (unsigned)sc->shrink_timer.c_flags); 2167 2168 db_printf(" quiescing: %d\n", (int)sc->quiescing); 2169 db_printf(" destroying: %d\n", (int)sc->destroying); 2170 db_printf(" is_resize_pending: %d\n", (int)sc->is_resize_pending); 2171 db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending); 2172 db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending); 2173 db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running); 2174 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 2175 2176 db_printf(" head: %u\n", sc->head); 2177 db_printf(" tail: %u\n", sc->tail); 2178 db_printf(" hw_head: %u\n", sc->hw_head); 2179 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 2180 db_printf(" last_seen: 0x%lx\n", sc->last_seen); 2181 db_printf(" ring: %p\n", sc->ring); 2182 2183 db_printf(" cleanup_lock: "); 2184 db_show_lock(&sc->cleanup_lock); 2185 2186 db_printf(" refcnt: %u\n", sc->refcnt); 2187#ifdef INVARIANTS 2188 CTASSERT(IOAT_NUM_REF_KINDS == 2); 2189 db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0], 2190 sc->refkinds[1]); 2191#endif 2192 db_printf(" stats:\n"); 2193 db_printf(" interrupts: %lu\n", sc->stats.interrupts); 2194 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 2195 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 2196 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 2197 2198 db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 2199 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 2200 2201 if (db_pager_quit) 2202 return; 2203 2204 db_printf(" hw status:\n"); 2205 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 2206 db_printf(" chanctrl: 0x%x\n", 2207 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 2208 db_printf(" chancmd: 0x%x\n", 2209 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 2210 db_printf(" dmacount: 0x%x\n", 2211 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 2212 db_printf(" chainaddr: 0x%lx\n", 2213 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 2214 db_printf(" chancmp: 0x%lx\n", 2215 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 2216 db_printf(" chanerr: %b\n", 2217 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 2218 return; 2219usage: 2220 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 2221 return; 2222} 2223#endif /* DDB */ 2224