ioat.c revision 302408
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/ioat/ioat.c 302354 2016-07-05 20:53:32Z cem $"); 29 30#include "opt_ddb.h" 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/conf.h> 36#include <sys/ioccom.h> 37#include <sys/kernel.h> 38#include <sys/lock.h> 39#include <sys/malloc.h> 40#include <sys/module.h> 41#include <sys/mutex.h> 42#include <sys/rman.h> 43#include <sys/sbuf.h> 44#include <sys/sysctl.h> 45#include <sys/taskqueue.h> 46#include <sys/time.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49#include <machine/bus.h> 50#include <machine/resource.h> 51#include <machine/stdarg.h> 52 53#ifdef DDB 54#include <ddb/ddb.h> 55#endif 56 57#include "ioat.h" 58#include "ioat_hw.h" 59#include "ioat_internal.h" 60 61#ifndef BUS_SPACE_MAXADDR_40BIT 62#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL 63#endif 64#define IOAT_REFLK (&ioat->submit_lock) 65#define IOAT_SHRINK_PERIOD (10 * hz) 66 67static int ioat_probe(device_t device); 68static int ioat_attach(device_t device); 69static int ioat_detach(device_t device); 70static int ioat_setup_intr(struct ioat_softc *ioat); 71static int ioat_teardown_intr(struct ioat_softc *ioat); 72static int ioat3_attach(device_t device); 73static int ioat_start_channel(struct ioat_softc *ioat); 74static int ioat_map_pci_bar(struct ioat_softc *ioat); 75static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 76 int error); 77static void ioat_interrupt_handler(void *arg); 78static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 79static int chanerr_to_errno(uint32_t); 80static void ioat_process_events(struct ioat_softc *ioat); 81static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 82static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 83static void ioat_free_ring(struct ioat_softc *, uint32_t size, 84 struct ioat_descriptor **); 85static void ioat_free_ring_entry(struct ioat_softc *ioat, 86 struct ioat_descriptor *desc); 87static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, 88 int mflags); 89static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 90static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, 91 uint32_t index); 92static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, 93 uint32_t size, boolean_t need_dscr, int mflags); 94static int ring_grow(struct ioat_softc *, uint32_t oldorder, 95 struct ioat_descriptor **); 96static int ring_shrink(struct ioat_softc *, uint32_t oldorder, 97 struct ioat_descriptor **); 98static void ioat_halted_debug(struct ioat_softc *, uint32_t); 99static void ioat_poll_timer_callback(void *arg); 100static void ioat_shrink_timer_callback(void *arg); 101static void dump_descriptor(void *hw_desc); 102static void ioat_submit_single(struct ioat_softc *ioat); 103static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 104 int error); 105static int ioat_reset_hw(struct ioat_softc *ioat); 106static void ioat_reset_hw_task(void *, int); 107static void ioat_setup_sysctl(device_t device); 108static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 109static inline struct ioat_softc *ioat_get(struct ioat_softc *, 110 enum ioat_ref_kind); 111static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); 112static inline void _ioat_putn(struct ioat_softc *, uint32_t, 113 enum ioat_ref_kind, boolean_t); 114static inline void ioat_putn(struct ioat_softc *, uint32_t, 115 enum ioat_ref_kind); 116static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, 117 enum ioat_ref_kind); 118static void ioat_drain_locked(struct ioat_softc *); 119 120#define ioat_log_message(v, ...) do { \ 121 if ((v) <= g_ioat_debug_level) { \ 122 device_printf(ioat->device, __VA_ARGS__); \ 123 } \ 124} while (0) 125 126MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 127SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 128 129static int g_force_legacy_interrupts; 130SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 131 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 132 133int g_ioat_debug_level = 0; 134SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 135 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 136 137/* 138 * OS <-> Driver interface structures 139 */ 140static device_method_t ioat_pci_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_probe, ioat_probe), 143 DEVMETHOD(device_attach, ioat_attach), 144 DEVMETHOD(device_detach, ioat_detach), 145 DEVMETHOD_END 146}; 147 148static driver_t ioat_pci_driver = { 149 "ioat", 150 ioat_pci_methods, 151 sizeof(struct ioat_softc), 152}; 153 154static devclass_t ioat_devclass; 155DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 156MODULE_VERSION(ioat, 1); 157 158/* 159 * Private data structures 160 */ 161static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 162static unsigned ioat_channel_index = 0; 163SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 164 "Number of IOAT channels attached"); 165 166static struct _pcsid 167{ 168 u_int32_t type; 169 const char *desc; 170} pci_ids[] = { 171 { 0x34308086, "TBG IOAT Ch0" }, 172 { 0x34318086, "TBG IOAT Ch1" }, 173 { 0x34328086, "TBG IOAT Ch2" }, 174 { 0x34338086, "TBG IOAT Ch3" }, 175 { 0x34298086, "TBG IOAT Ch4" }, 176 { 0x342a8086, "TBG IOAT Ch5" }, 177 { 0x342b8086, "TBG IOAT Ch6" }, 178 { 0x342c8086, "TBG IOAT Ch7" }, 179 180 { 0x37108086, "JSF IOAT Ch0" }, 181 { 0x37118086, "JSF IOAT Ch1" }, 182 { 0x37128086, "JSF IOAT Ch2" }, 183 { 0x37138086, "JSF IOAT Ch3" }, 184 { 0x37148086, "JSF IOAT Ch4" }, 185 { 0x37158086, "JSF IOAT Ch5" }, 186 { 0x37168086, "JSF IOAT Ch6" }, 187 { 0x37178086, "JSF IOAT Ch7" }, 188 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 189 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 190 191 { 0x3c208086, "SNB IOAT Ch0" }, 192 { 0x3c218086, "SNB IOAT Ch1" }, 193 { 0x3c228086, "SNB IOAT Ch2" }, 194 { 0x3c238086, "SNB IOAT Ch3" }, 195 { 0x3c248086, "SNB IOAT Ch4" }, 196 { 0x3c258086, "SNB IOAT Ch5" }, 197 { 0x3c268086, "SNB IOAT Ch6" }, 198 { 0x3c278086, "SNB IOAT Ch7" }, 199 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 200 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 201 202 { 0x0e208086, "IVB IOAT Ch0" }, 203 { 0x0e218086, "IVB IOAT Ch1" }, 204 { 0x0e228086, "IVB IOAT Ch2" }, 205 { 0x0e238086, "IVB IOAT Ch3" }, 206 { 0x0e248086, "IVB IOAT Ch4" }, 207 { 0x0e258086, "IVB IOAT Ch5" }, 208 { 0x0e268086, "IVB IOAT Ch6" }, 209 { 0x0e278086, "IVB IOAT Ch7" }, 210 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 211 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 212 213 { 0x2f208086, "HSW IOAT Ch0" }, 214 { 0x2f218086, "HSW IOAT Ch1" }, 215 { 0x2f228086, "HSW IOAT Ch2" }, 216 { 0x2f238086, "HSW IOAT Ch3" }, 217 { 0x2f248086, "HSW IOAT Ch4" }, 218 { 0x2f258086, "HSW IOAT Ch5" }, 219 { 0x2f268086, "HSW IOAT Ch6" }, 220 { 0x2f278086, "HSW IOAT Ch7" }, 221 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 222 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 223 224 { 0x0c508086, "BWD IOAT Ch0" }, 225 { 0x0c518086, "BWD IOAT Ch1" }, 226 { 0x0c528086, "BWD IOAT Ch2" }, 227 { 0x0c538086, "BWD IOAT Ch3" }, 228 229 { 0x6f508086, "BDXDE IOAT Ch0" }, 230 { 0x6f518086, "BDXDE IOAT Ch1" }, 231 { 0x6f528086, "BDXDE IOAT Ch2" }, 232 { 0x6f538086, "BDXDE IOAT Ch3" }, 233 234 { 0x6f208086, "BDX IOAT Ch0" }, 235 { 0x6f218086, "BDX IOAT Ch1" }, 236 { 0x6f228086, "BDX IOAT Ch2" }, 237 { 0x6f238086, "BDX IOAT Ch3" }, 238 { 0x6f248086, "BDX IOAT Ch4" }, 239 { 0x6f258086, "BDX IOAT Ch5" }, 240 { 0x6f268086, "BDX IOAT Ch6" }, 241 { 0x6f278086, "BDX IOAT Ch7" }, 242 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 243 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 244 245 { 0x00000000, NULL } 246}; 247 248/* 249 * OS <-> Driver linkage functions 250 */ 251static int 252ioat_probe(device_t device) 253{ 254 struct _pcsid *ep; 255 u_int32_t type; 256 257 type = pci_get_devid(device); 258 for (ep = pci_ids; ep->type; ep++) { 259 if (ep->type == type) { 260 device_set_desc(device, ep->desc); 261 return (0); 262 } 263 } 264 return (ENXIO); 265} 266 267static int 268ioat_attach(device_t device) 269{ 270 struct ioat_softc *ioat; 271 int error; 272 273 ioat = DEVICE2SOFTC(device); 274 ioat->device = device; 275 276 error = ioat_map_pci_bar(ioat); 277 if (error != 0) 278 goto err; 279 280 ioat->version = ioat_read_cbver(ioat); 281 if (ioat->version < IOAT_VER_3_0) { 282 error = ENODEV; 283 goto err; 284 } 285 286 error = ioat3_attach(device); 287 if (error != 0) 288 goto err; 289 290 error = pci_enable_busmaster(device); 291 if (error != 0) 292 goto err; 293 294 error = ioat_setup_intr(ioat); 295 if (error != 0) 296 goto err; 297 298 error = ioat_reset_hw(ioat); 299 if (error != 0) 300 goto err; 301 302 ioat_process_events(ioat); 303 ioat_setup_sysctl(device); 304 305 ioat->chan_idx = ioat_channel_index; 306 ioat_channel[ioat_channel_index++] = ioat; 307 ioat_test_attach(); 308 309err: 310 if (error != 0) 311 ioat_detach(device); 312 return (error); 313} 314 315static int 316ioat_detach(device_t device) 317{ 318 struct ioat_softc *ioat; 319 320 ioat = DEVICE2SOFTC(device); 321 322 ioat_test_detach(); 323 taskqueue_drain(taskqueue_thread, &ioat->reset_task); 324 325 mtx_lock(IOAT_REFLK); 326 ioat->quiescing = TRUE; 327 ioat->destroying = TRUE; 328 wakeup(&ioat->quiescing); 329 wakeup(&ioat->resetting); 330 331 ioat_channel[ioat->chan_idx] = NULL; 332 333 ioat_drain_locked(ioat); 334 mtx_unlock(IOAT_REFLK); 335 336 ioat_teardown_intr(ioat); 337 callout_drain(&ioat->poll_timer); 338 callout_drain(&ioat->shrink_timer); 339 340 pci_disable_busmaster(device); 341 342 if (ioat->pci_resource != NULL) 343 bus_release_resource(device, SYS_RES_MEMORY, 344 ioat->pci_resource_id, ioat->pci_resource); 345 346 if (ioat->ring != NULL) 347 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 348 349 if (ioat->comp_update != NULL) { 350 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 351 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 352 ioat->comp_update_map); 353 bus_dma_tag_destroy(ioat->comp_update_tag); 354 } 355 356 bus_dma_tag_destroy(ioat->hw_desc_tag); 357 358 return (0); 359} 360 361static int 362ioat_teardown_intr(struct ioat_softc *ioat) 363{ 364 365 if (ioat->tag != NULL) 366 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 367 368 if (ioat->res != NULL) 369 bus_release_resource(ioat->device, SYS_RES_IRQ, 370 rman_get_rid(ioat->res), ioat->res); 371 372 pci_release_msi(ioat->device); 373 return (0); 374} 375 376static int 377ioat_start_channel(struct ioat_softc *ioat) 378{ 379 struct ioat_dma_hw_descriptor *hw_desc; 380 struct ioat_descriptor *desc; 381 struct bus_dmadesc *dmadesc; 382 uint64_t status; 383 uint32_t chanerr; 384 int i; 385 386 ioat_acquire(&ioat->dmaengine); 387 388 /* Submit 'NULL' operation manually to avoid quiescing flag */ 389 desc = ioat_get_ring_entry(ioat, ioat->head); 390 dmadesc = &desc->bus_dmadesc; 391 hw_desc = desc->u.dma; 392 393 dmadesc->callback_fn = NULL; 394 dmadesc->callback_arg = NULL; 395 396 hw_desc->u.control_raw = 0; 397 hw_desc->u.control_generic.op = IOAT_OP_COPY; 398 hw_desc->u.control_generic.completion_update = 1; 399 hw_desc->size = 8; 400 hw_desc->src_addr = 0; 401 hw_desc->dest_addr = 0; 402 hw_desc->u.control.null = 1; 403 404 ioat_submit_single(ioat); 405 ioat_release(&ioat->dmaengine); 406 407 for (i = 0; i < 100; i++) { 408 DELAY(1); 409 status = ioat_get_chansts(ioat); 410 if (is_ioat_idle(status)) 411 return (0); 412 } 413 414 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 415 ioat_log_message(0, "could not start channel: " 416 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 417 IOAT_CHANERR_STR); 418 return (ENXIO); 419} 420 421/* 422 * Initialize Hardware 423 */ 424static int 425ioat3_attach(device_t device) 426{ 427 struct ioat_softc *ioat; 428 struct ioat_descriptor **ring; 429 struct ioat_descriptor *next; 430 struct ioat_dma_hw_descriptor *dma_hw_desc; 431 int i, num_descriptors; 432 int error; 433 uint8_t xfercap; 434 435 error = 0; 436 ioat = DEVICE2SOFTC(device); 437 ioat->capabilities = ioat_read_dmacapability(ioat); 438 439 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 440 IOAT_DMACAP_STR); 441 442 xfercap = ioat_read_xfercap(ioat); 443 ioat->max_xfer_size = 1 << xfercap; 444 445 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 446 IOAT_INTRDELAY_SUPPORTED) != 0; 447 if (ioat->intrdelay_supported) 448 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 449 450 /* TODO: need to check DCA here if we ever do XOR/PQ */ 451 452 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 453 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 454 callout_init(&ioat->poll_timer, 1); 455 callout_init(&ioat->shrink_timer, 1); 456 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 457 458 /* Establish lock order for Witness */ 459 mtx_lock(&ioat->submit_lock); 460 mtx_lock(&ioat->cleanup_lock); 461 mtx_unlock(&ioat->cleanup_lock); 462 mtx_unlock(&ioat->submit_lock); 463 464 ioat->is_resize_pending = FALSE; 465 ioat->is_completion_pending = FALSE; 466 ioat->is_reset_pending = FALSE; 467 ioat->is_channel_running = FALSE; 468 469 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 470 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 471 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 472 &ioat->comp_update_tag); 473 474 error = bus_dmamem_alloc(ioat->comp_update_tag, 475 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); 476 if (ioat->comp_update == NULL) 477 return (ENOMEM); 478 479 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 480 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 481 0); 482 if (error != 0) 483 return (error); 484 485 ioat->ring_size_order = IOAT_MIN_ORDER; 486 487 num_descriptors = 1 << ioat->ring_size_order; 488 489 bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, 490 BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL, 491 sizeof(struct ioat_dma_hw_descriptor), 1, 492 sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, 493 &ioat->hw_desc_tag); 494 495 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 496 M_ZERO | M_WAITOK); 497 498 ring = ioat->ring; 499 for (i = 0; i < num_descriptors; i++) { 500 ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); 501 if (ring[i] == NULL) 502 return (ENOMEM); 503 504 ring[i]->id = i; 505 } 506 507 for (i = 0; i < num_descriptors - 1; i++) { 508 next = ring[i + 1]; 509 dma_hw_desc = ring[i]->u.dma; 510 511 dma_hw_desc->next = next->hw_desc_bus_addr; 512 } 513 514 ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; 515 516 ioat->head = ioat->hw_head = 0; 517 ioat->tail = 0; 518 ioat->last_seen = 0; 519 *ioat->comp_update = 0; 520 return (0); 521} 522 523static int 524ioat_map_pci_bar(struct ioat_softc *ioat) 525{ 526 527 ioat->pci_resource_id = PCIR_BAR(0); 528 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 529 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 530 531 if (ioat->pci_resource == NULL) { 532 ioat_log_message(0, "unable to allocate pci resource\n"); 533 return (ENODEV); 534 } 535 536 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 537 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 538 return (0); 539} 540 541static void 542ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 543{ 544 struct ioat_softc *ioat = arg; 545 546 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 547 ioat->comp_update_bus_addr = seg[0].ds_addr; 548} 549 550static void 551ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 552{ 553 bus_addr_t *baddr; 554 555 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 556 baddr = arg; 557 *baddr = segs->ds_addr; 558} 559 560/* 561 * Interrupt setup and handlers 562 */ 563static int 564ioat_setup_intr(struct ioat_softc *ioat) 565{ 566 uint32_t num_vectors; 567 int error; 568 boolean_t use_msix; 569 boolean_t force_legacy_interrupts; 570 571 use_msix = FALSE; 572 force_legacy_interrupts = FALSE; 573 574 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 575 num_vectors = 1; 576 pci_alloc_msix(ioat->device, &num_vectors); 577 if (num_vectors == 1) 578 use_msix = TRUE; 579 } 580 581 if (use_msix) { 582 ioat->rid = 1; 583 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 584 &ioat->rid, RF_ACTIVE); 585 } else { 586 ioat->rid = 0; 587 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 588 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 589 } 590 if (ioat->res == NULL) { 591 ioat_log_message(0, "bus_alloc_resource failed\n"); 592 return (ENOMEM); 593 } 594 595 ioat->tag = NULL; 596 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 597 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 598 if (error != 0) { 599 ioat_log_message(0, "bus_setup_intr failed\n"); 600 return (error); 601 } 602 603 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 604 return (0); 605} 606 607static boolean_t 608ioat_model_resets_msix(struct ioat_softc *ioat) 609{ 610 u_int32_t pciid; 611 612 pciid = pci_get_devid(ioat->device); 613 switch (pciid) { 614 /* BWD: */ 615 case 0x0c508086: 616 case 0x0c518086: 617 case 0x0c528086: 618 case 0x0c538086: 619 /* BDXDE: */ 620 case 0x6f508086: 621 case 0x6f518086: 622 case 0x6f528086: 623 case 0x6f538086: 624 return (TRUE); 625 } 626 627 return (FALSE); 628} 629 630static void 631ioat_interrupt_handler(void *arg) 632{ 633 struct ioat_softc *ioat = arg; 634 635 ioat->stats.interrupts++; 636 ioat_process_events(ioat); 637} 638 639static int 640chanerr_to_errno(uint32_t chanerr) 641{ 642 643 if (chanerr == 0) 644 return (0); 645 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 646 return (EFAULT); 647 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 648 return (EIO); 649 /* This one is probably our fault: */ 650 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 651 return (EIO); 652 return (EIO); 653} 654 655static void 656ioat_process_events(struct ioat_softc *ioat) 657{ 658 struct ioat_descriptor *desc; 659 struct bus_dmadesc *dmadesc; 660 uint64_t comp_update, status; 661 uint32_t completed, chanerr; 662 boolean_t pending; 663 int error; 664 665 CTR0(KTR_IOAT, __func__); 666 667 mtx_lock(&ioat->cleanup_lock); 668 669 /* 670 * Don't run while the hardware is being reset. Reset is responsible 671 * for blocking new work and draining & completing existing work, so 672 * there is nothing to do until new work is queued after reset anyway. 673 */ 674 if (ioat->resetting_cleanup) { 675 mtx_unlock(&ioat->cleanup_lock); 676 return; 677 } 678 679 completed = 0; 680 comp_update = *ioat->comp_update; 681 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 682 683 if (status == ioat->last_seen) { 684 /* 685 * If we landed in process_events and nothing has been 686 * completed, check for a timeout due to channel halt. 687 */ 688 comp_update = ioat_get_chansts(ioat); 689 goto out; 690 } 691 692 while (1) { 693 desc = ioat_get_ring_entry(ioat, ioat->tail); 694 dmadesc = &desc->bus_dmadesc; 695 CTR1(KTR_IOAT, "completing desc %d", ioat->tail); 696 697 if (dmadesc->callback_fn != NULL) 698 dmadesc->callback_fn(dmadesc->callback_arg, 0); 699 700 completed++; 701 ioat->tail++; 702 if (desc->hw_desc_bus_addr == status) 703 break; 704 } 705 706 ioat->last_seen = desc->hw_desc_bus_addr; 707 ioat->stats.descriptors_processed += completed; 708 709out: 710 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 711 712 /* Perform a racy check first; only take the locks if it passes. */ 713 pending = (ioat_get_active(ioat) != 0); 714 if (!pending && ioat->is_completion_pending) { 715 mtx_unlock(&ioat->cleanup_lock); 716 mtx_lock(&ioat->submit_lock); 717 mtx_lock(&ioat->cleanup_lock); 718 719 pending = (ioat_get_active(ioat) != 0); 720 if (!pending && ioat->is_completion_pending) { 721 ioat->is_completion_pending = FALSE; 722 callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD, 723 ioat_shrink_timer_callback, ioat); 724 callout_stop(&ioat->poll_timer); 725 } 726 mtx_unlock(&ioat->submit_lock); 727 } 728 mtx_unlock(&ioat->cleanup_lock); 729 730 if (pending) 731 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 732 ioat); 733 734 if (completed != 0) { 735 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 736 wakeup(&ioat->tail); 737 } 738 739 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 740 return; 741 742 ioat->stats.channel_halts++; 743 744 /* 745 * Fatal programming error on this DMA channel. Flush any outstanding 746 * work with error status and restart the engine. 747 */ 748 ioat_log_message(0, "Channel halted due to fatal programming error\n"); 749 mtx_lock(&ioat->submit_lock); 750 mtx_lock(&ioat->cleanup_lock); 751 ioat->quiescing = TRUE; 752 753 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 754 ioat_halted_debug(ioat, chanerr); 755 ioat->stats.last_halt_chanerr = chanerr; 756 757 while (ioat_get_active(ioat) > 0) { 758 desc = ioat_get_ring_entry(ioat, ioat->tail); 759 dmadesc = &desc->bus_dmadesc; 760 CTR1(KTR_IOAT, "completing err desc %d", ioat->tail); 761 762 if (dmadesc->callback_fn != NULL) 763 dmadesc->callback_fn(dmadesc->callback_arg, 764 chanerr_to_errno(chanerr)); 765 766 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); 767 ioat->tail++; 768 ioat->stats.descriptors_processed++; 769 ioat->stats.descriptors_error++; 770 } 771 772 /* Clear error status */ 773 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 774 775 mtx_unlock(&ioat->cleanup_lock); 776 mtx_unlock(&ioat->submit_lock); 777 778 ioat_log_message(0, "Resetting channel to recover from error\n"); 779 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 780 KASSERT(error == 0, 781 ("%s: taskqueue_enqueue failed: %d", __func__, error)); 782} 783 784static void 785ioat_reset_hw_task(void *ctx, int pending __unused) 786{ 787 struct ioat_softc *ioat; 788 int error; 789 790 ioat = ctx; 791 ioat_log_message(1, "%s: Resetting channel\n", __func__); 792 793 error = ioat_reset_hw(ioat); 794 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 795 (void)error; 796} 797 798/* 799 * User API functions 800 */ 801unsigned 802ioat_get_nchannels(void) 803{ 804 805 return (ioat_channel_index); 806} 807 808bus_dmaengine_t 809ioat_get_dmaengine(uint32_t index, int flags) 810{ 811 struct ioat_softc *ioat; 812 813 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 814 ("invalid flags: 0x%08x", flags)); 815 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 816 ("invalid wait | nowait")); 817 818 if (index >= ioat_channel_index) 819 return (NULL); 820 821 ioat = ioat_channel[index]; 822 if (ioat == NULL || ioat->destroying) 823 return (NULL); 824 825 if (ioat->quiescing) { 826 if ((flags & M_NOWAIT) != 0) 827 return (NULL); 828 829 mtx_lock(IOAT_REFLK); 830 while (ioat->quiescing && !ioat->destroying) 831 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0); 832 mtx_unlock(IOAT_REFLK); 833 834 if (ioat->destroying) 835 return (NULL); 836 } 837 838 /* 839 * There's a race here between the quiescing check and HW reset or 840 * module destroy. 841 */ 842 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine); 843} 844 845void 846ioat_put_dmaengine(bus_dmaengine_t dmaengine) 847{ 848 struct ioat_softc *ioat; 849 850 ioat = to_ioat_softc(dmaengine); 851 ioat_put(ioat, IOAT_DMAENGINE_REF); 852} 853 854int 855ioat_get_hwversion(bus_dmaengine_t dmaengine) 856{ 857 struct ioat_softc *ioat; 858 859 ioat = to_ioat_softc(dmaengine); 860 return (ioat->version); 861} 862 863size_t 864ioat_get_max_io_size(bus_dmaengine_t dmaengine) 865{ 866 struct ioat_softc *ioat; 867 868 ioat = to_ioat_softc(dmaengine); 869 return (ioat->max_xfer_size); 870} 871 872int 873ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 874{ 875 struct ioat_softc *ioat; 876 877 ioat = to_ioat_softc(dmaengine); 878 if (!ioat->intrdelay_supported) 879 return (ENODEV); 880 if (delay > ioat->intrdelay_max) 881 return (ERANGE); 882 883 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 884 ioat->cached_intrdelay = 885 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 886 return (0); 887} 888 889uint16_t 890ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 891{ 892 struct ioat_softc *ioat; 893 894 ioat = to_ioat_softc(dmaengine); 895 return (ioat->intrdelay_max); 896} 897 898void 899ioat_acquire(bus_dmaengine_t dmaengine) 900{ 901 struct ioat_softc *ioat; 902 903 ioat = to_ioat_softc(dmaengine); 904 mtx_lock(&ioat->submit_lock); 905 CTR0(KTR_IOAT, __func__); 906} 907 908int 909ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 910{ 911 struct ioat_softc *ioat; 912 int error; 913 914 ioat = to_ioat_softc(dmaengine); 915 ioat_acquire(dmaengine); 916 917 error = ioat_reserve_space(ioat, n, mflags); 918 if (error != 0) 919 ioat_release(dmaengine); 920 return (error); 921} 922 923void 924ioat_release(bus_dmaengine_t dmaengine) 925{ 926 struct ioat_softc *ioat; 927 928 ioat = to_ioat_softc(dmaengine); 929 CTR0(KTR_IOAT, __func__); 930 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); 931 mtx_unlock(&ioat->submit_lock); 932} 933 934static struct ioat_descriptor * 935ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 936 uint32_t size, uint64_t src, uint64_t dst, 937 bus_dmaengine_callback_t callback_fn, void *callback_arg, 938 uint32_t flags) 939{ 940 struct ioat_generic_hw_descriptor *hw_desc; 941 struct ioat_descriptor *desc; 942 int mflags; 943 944 mtx_assert(&ioat->submit_lock, MA_OWNED); 945 946 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 947 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 948 if ((flags & DMA_NO_WAIT) != 0) 949 mflags = M_NOWAIT; 950 else 951 mflags = M_WAITOK; 952 953 if (size > ioat->max_xfer_size) { 954 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", 955 __func__, ioat->max_xfer_size, (unsigned)size); 956 return (NULL); 957 } 958 959 if (ioat_reserve_space(ioat, 1, mflags) != 0) 960 return (NULL); 961 962 desc = ioat_get_ring_entry(ioat, ioat->head); 963 hw_desc = desc->u.generic; 964 965 hw_desc->u.control_raw = 0; 966 hw_desc->u.control_generic.op = op; 967 hw_desc->u.control_generic.completion_update = 1; 968 969 if ((flags & DMA_INT_EN) != 0) 970 hw_desc->u.control_generic.int_enable = 1; 971 if ((flags & DMA_FENCE) != 0) 972 hw_desc->u.control_generic.fence = 1; 973 974 hw_desc->size = size; 975 hw_desc->src_addr = src; 976 hw_desc->dest_addr = dst; 977 978 desc->bus_dmadesc.callback_fn = callback_fn; 979 desc->bus_dmadesc.callback_arg = callback_arg; 980 return (desc); 981} 982 983struct bus_dmadesc * 984ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 985 void *callback_arg, uint32_t flags) 986{ 987 struct ioat_dma_hw_descriptor *hw_desc; 988 struct ioat_descriptor *desc; 989 struct ioat_softc *ioat; 990 991 CTR0(KTR_IOAT, __func__); 992 ioat = to_ioat_softc(dmaengine); 993 994 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 995 callback_arg, flags); 996 if (desc == NULL) 997 return (NULL); 998 999 hw_desc = desc->u.dma; 1000 hw_desc->u.control.null = 1; 1001 ioat_submit_single(ioat); 1002 return (&desc->bus_dmadesc); 1003} 1004 1005struct bus_dmadesc * 1006ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 1007 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 1008 void *callback_arg, uint32_t flags) 1009{ 1010 struct ioat_dma_hw_descriptor *hw_desc; 1011 struct ioat_descriptor *desc; 1012 struct ioat_softc *ioat; 1013 1014 CTR0(KTR_IOAT, __func__); 1015 ioat = to_ioat_softc(dmaengine); 1016 1017 if (((src | dst) & (0xffffull << 48)) != 0) { 1018 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 1019 __func__); 1020 return (NULL); 1021 } 1022 1023 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 1024 callback_arg, flags); 1025 if (desc == NULL) 1026 return (NULL); 1027 1028 hw_desc = desc->u.dma; 1029 if (g_ioat_debug_level >= 3) 1030 dump_descriptor(hw_desc); 1031 1032 ioat_submit_single(ioat); 1033 return (&desc->bus_dmadesc); 1034} 1035 1036struct bus_dmadesc * 1037ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1038 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1039 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1040{ 1041 struct ioat_dma_hw_descriptor *hw_desc; 1042 struct ioat_descriptor *desc; 1043 struct ioat_softc *ioat; 1044 1045 CTR0(KTR_IOAT, __func__); 1046 ioat = to_ioat_softc(dmaengine); 1047 1048 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) { 1049 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 1050 __func__); 1051 return (NULL); 1052 } 1053 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) { 1054 ioat_log_message(0, "%s: Addresses must be page-aligned\n", 1055 __func__); 1056 return (NULL); 1057 } 1058 1059 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, 1060 callback_fn, callback_arg, flags); 1061 if (desc == NULL) 1062 return (NULL); 1063 1064 hw_desc = desc->u.dma; 1065 if (src2 != src1 + PAGE_SIZE) { 1066 hw_desc->u.control.src_page_break = 1; 1067 hw_desc->next_src_addr = src2; 1068 } 1069 if (dst2 != dst1 + PAGE_SIZE) { 1070 hw_desc->u.control.dest_page_break = 1; 1071 hw_desc->next_dest_addr = dst2; 1072 } 1073 1074 if (g_ioat_debug_level >= 3) 1075 dump_descriptor(hw_desc); 1076 1077 ioat_submit_single(ioat); 1078 return (&desc->bus_dmadesc); 1079} 1080 1081struct bus_dmadesc * 1082ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1083 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1084 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1085{ 1086 struct ioat_crc32_hw_descriptor *hw_desc; 1087 struct ioat_descriptor *desc; 1088 struct ioat_softc *ioat; 1089 uint32_t teststore; 1090 uint8_t op; 1091 1092 CTR0(KTR_IOAT, __func__); 1093 ioat = to_ioat_softc(dmaengine); 1094 1095 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) { 1096 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n", 1097 __func__); 1098 return (NULL); 1099 } 1100 if (((src | dst) & (0xffffffull << 40)) != 0) { 1101 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n", 1102 __func__); 1103 return (NULL); 1104 } 1105 teststore = (flags & _DMA_CRC_TESTSTORE); 1106 if (teststore == _DMA_CRC_TESTSTORE) { 1107 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1108 return (NULL); 1109 } 1110 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1111 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1112 __func__); 1113 return (NULL); 1114 } 1115 1116 switch (teststore) { 1117 case DMA_CRC_STORE: 1118 op = IOAT_OP_MOVECRC_STORE; 1119 break; 1120 case DMA_CRC_TEST: 1121 op = IOAT_OP_MOVECRC_TEST; 1122 break; 1123 default: 1124 KASSERT(teststore == 0, ("bogus")); 1125 op = IOAT_OP_MOVECRC; 1126 break; 1127 } 1128 1129 if ((flags & DMA_CRC_INLINE) == 0 && 1130 (crcptr & (0xffffffull << 40)) != 0) { 1131 ioat_log_message(0, 1132 "%s: High 24 bits of crcptr invalid\n", __func__); 1133 return (NULL); 1134 } 1135 1136 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1137 callback_arg, flags & ~_DMA_CRC_FLAGS); 1138 if (desc == NULL) 1139 return (NULL); 1140 1141 hw_desc = desc->u.crc32; 1142 1143 if ((flags & DMA_CRC_INLINE) == 0) 1144 hw_desc->crc_address = crcptr; 1145 else 1146 hw_desc->u.control.crc_location = 1; 1147 1148 if (initialseed != NULL) { 1149 hw_desc->u.control.use_seed = 1; 1150 hw_desc->seed = *initialseed; 1151 } 1152 1153 if (g_ioat_debug_level >= 3) 1154 dump_descriptor(hw_desc); 1155 1156 ioat_submit_single(ioat); 1157 return (&desc->bus_dmadesc); 1158} 1159 1160struct bus_dmadesc * 1161ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1162 uint32_t *initialseed, bus_addr_t crcptr, 1163 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1164{ 1165 struct ioat_crc32_hw_descriptor *hw_desc; 1166 struct ioat_descriptor *desc; 1167 struct ioat_softc *ioat; 1168 uint32_t teststore; 1169 uint8_t op; 1170 1171 CTR0(KTR_IOAT, __func__); 1172 ioat = to_ioat_softc(dmaengine); 1173 1174 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) { 1175 ioat_log_message(0, "%s: Device lacks CRC capability\n", 1176 __func__); 1177 return (NULL); 1178 } 1179 if ((src & (0xffffffull << 40)) != 0) { 1180 ioat_log_message(0, "%s: High 24 bits of src invalid\n", 1181 __func__); 1182 return (NULL); 1183 } 1184 teststore = (flags & _DMA_CRC_TESTSTORE); 1185 if (teststore == _DMA_CRC_TESTSTORE) { 1186 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1187 return (NULL); 1188 } 1189 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1190 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1191 __func__); 1192 return (NULL); 1193 } 1194 1195 switch (teststore) { 1196 case DMA_CRC_STORE: 1197 op = IOAT_OP_CRC_STORE; 1198 break; 1199 case DMA_CRC_TEST: 1200 op = IOAT_OP_CRC_TEST; 1201 break; 1202 default: 1203 KASSERT(teststore == 0, ("bogus")); 1204 op = IOAT_OP_CRC; 1205 break; 1206 } 1207 1208 if ((flags & DMA_CRC_INLINE) == 0 && 1209 (crcptr & (0xffffffull << 40)) != 0) { 1210 ioat_log_message(0, 1211 "%s: High 24 bits of crcptr invalid\n", __func__); 1212 return (NULL); 1213 } 1214 1215 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1216 callback_arg, flags & ~_DMA_CRC_FLAGS); 1217 if (desc == NULL) 1218 return (NULL); 1219 1220 hw_desc = desc->u.crc32; 1221 1222 if ((flags & DMA_CRC_INLINE) == 0) 1223 hw_desc->crc_address = crcptr; 1224 else 1225 hw_desc->u.control.crc_location = 1; 1226 1227 if (initialseed != NULL) { 1228 hw_desc->u.control.use_seed = 1; 1229 hw_desc->seed = *initialseed; 1230 } 1231 1232 if (g_ioat_debug_level >= 3) 1233 dump_descriptor(hw_desc); 1234 1235 ioat_submit_single(ioat); 1236 return (&desc->bus_dmadesc); 1237} 1238 1239struct bus_dmadesc * 1240ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1241 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1242 uint32_t flags) 1243{ 1244 struct ioat_fill_hw_descriptor *hw_desc; 1245 struct ioat_descriptor *desc; 1246 struct ioat_softc *ioat; 1247 1248 CTR0(KTR_IOAT, __func__); 1249 ioat = to_ioat_softc(dmaengine); 1250 1251 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { 1252 ioat_log_message(0, "%s: Device lacks BFILL capability\n", 1253 __func__); 1254 return (NULL); 1255 } 1256 1257 if ((dst & (0xffffull << 48)) != 0) { 1258 ioat_log_message(0, "%s: High 16 bits of dst invalid\n", 1259 __func__); 1260 return (NULL); 1261 } 1262 1263 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 1264 callback_fn, callback_arg, flags); 1265 if (desc == NULL) 1266 return (NULL); 1267 1268 hw_desc = desc->u.fill; 1269 if (g_ioat_debug_level >= 3) 1270 dump_descriptor(hw_desc); 1271 1272 ioat_submit_single(ioat); 1273 return (&desc->bus_dmadesc); 1274} 1275 1276/* 1277 * Ring Management 1278 */ 1279static inline uint32_t 1280ioat_get_active(struct ioat_softc *ioat) 1281{ 1282 1283 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1284} 1285 1286static inline uint32_t 1287ioat_get_ring_space(struct ioat_softc *ioat) 1288{ 1289 1290 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1291} 1292 1293static struct ioat_descriptor * 1294ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) 1295{ 1296 struct ioat_generic_hw_descriptor *hw_desc; 1297 struct ioat_descriptor *desc; 1298 int error, busdmaflag; 1299 1300 error = ENOMEM; 1301 hw_desc = NULL; 1302 1303 if ((mflags & M_WAITOK) != 0) 1304 busdmaflag = BUS_DMA_WAITOK; 1305 else 1306 busdmaflag = BUS_DMA_NOWAIT; 1307 1308 desc = malloc(sizeof(*desc), M_IOAT, mflags); 1309 if (desc == NULL) 1310 goto out; 1311 1312 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, 1313 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); 1314 if (hw_desc == NULL) 1315 goto out; 1316 1317 memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc)); 1318 desc->u.generic = hw_desc; 1319 1320 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 1321 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 1322 busdmaflag); 1323 if (error) 1324 goto out; 1325 1326out: 1327 if (error) { 1328 ioat_free_ring_entry(ioat, desc); 1329 return (NULL); 1330 } 1331 return (desc); 1332} 1333 1334static void 1335ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) 1336{ 1337 1338 if (desc == NULL) 1339 return; 1340 1341 if (desc->u.generic) 1342 bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic, 1343 ioat->hw_desc_map); 1344 free(desc, M_IOAT); 1345} 1346 1347/* 1348 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1349 * for 'num_descs'. 1350 * 1351 * If mflags contains M_WAITOK, blocks until enough space is available. 1352 * 1353 * Returns zero on success, or an errno on error. If num_descs is beyond the 1354 * maximum ring size, returns EINVAl; if allocation would block and mflags 1355 * contains M_NOWAIT, returns EAGAIN. 1356 * 1357 * Must be called with the submit_lock held; returns with the lock held. The 1358 * lock may be dropped to allocate the ring. 1359 * 1360 * (The submit_lock is needed to add any entries to the ring, so callers are 1361 * assured enough room is available.) 1362 */ 1363static int 1364ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1365{ 1366 struct ioat_descriptor **new_ring; 1367 uint32_t order; 1368 int error; 1369 1370 mtx_assert(&ioat->submit_lock, MA_OWNED); 1371 error = 0; 1372 1373 if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) { 1374 error = EINVAL; 1375 goto out; 1376 } 1377 if (ioat->quiescing) { 1378 error = ENXIO; 1379 goto out; 1380 } 1381 1382 for (;;) { 1383 if (ioat_get_ring_space(ioat) >= num_descs) 1384 goto out; 1385 1386 order = ioat->ring_size_order; 1387 if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { 1388 if ((mflags & M_WAITOK) != 0) { 1389 msleep(&ioat->tail, &ioat->submit_lock, 0, 1390 "ioat_rsz", 0); 1391 continue; 1392 } 1393 1394 error = EAGAIN; 1395 break; 1396 } 1397 1398 ioat->is_resize_pending = TRUE; 1399 for (;;) { 1400 mtx_unlock(&ioat->submit_lock); 1401 1402 new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), 1403 TRUE, mflags); 1404 1405 mtx_lock(&ioat->submit_lock); 1406 KASSERT(ioat->ring_size_order == order, 1407 ("is_resize_pending should protect order")); 1408 1409 if (new_ring == NULL) { 1410 KASSERT((mflags & M_WAITOK) == 0, 1411 ("allocation failed")); 1412 error = EAGAIN; 1413 break; 1414 } 1415 1416 error = ring_grow(ioat, order, new_ring); 1417 if (error == 0) 1418 break; 1419 } 1420 ioat->is_resize_pending = FALSE; 1421 wakeup(&ioat->tail); 1422 if (error) 1423 break; 1424 } 1425 1426out: 1427 mtx_assert(&ioat->submit_lock, MA_OWNED); 1428 return (error); 1429} 1430 1431static struct ioat_descriptor ** 1432ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, 1433 int mflags) 1434{ 1435 struct ioat_descriptor **ring; 1436 uint32_t i; 1437 int error; 1438 1439 KASSERT(size > 0 && powerof2(size), ("bogus size")); 1440 1441 ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); 1442 if (ring == NULL) 1443 return (NULL); 1444 1445 if (need_dscr) { 1446 error = ENOMEM; 1447 for (i = size / 2; i < size; i++) { 1448 ring[i] = ioat_alloc_ring_entry(ioat, mflags); 1449 if (ring[i] == NULL) 1450 goto out; 1451 ring[i]->id = i; 1452 } 1453 } 1454 error = 0; 1455 1456out: 1457 if (error != 0 && ring != NULL) { 1458 ioat_free_ring(ioat, size, ring); 1459 ring = NULL; 1460 } 1461 return (ring); 1462} 1463 1464static void 1465ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1466 struct ioat_descriptor **ring) 1467{ 1468 uint32_t i; 1469 1470 for (i = 0; i < size; i++) { 1471 if (ring[i] != NULL) 1472 ioat_free_ring_entry(ioat, ring[i]); 1473 } 1474 free(ring, M_IOAT); 1475} 1476 1477static struct ioat_descriptor * 1478ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1479{ 1480 1481 return (ioat->ring[index % (1 << ioat->ring_size_order)]); 1482} 1483 1484static int 1485ring_grow(struct ioat_softc *ioat, uint32_t oldorder, 1486 struct ioat_descriptor **newring) 1487{ 1488 struct ioat_descriptor *tmp, *next; 1489 struct ioat_dma_hw_descriptor *hw; 1490 uint32_t oldsize, newsize, head, tail, i, end; 1491 int error; 1492 1493 CTR0(KTR_IOAT, __func__); 1494 1495 mtx_assert(&ioat->submit_lock, MA_OWNED); 1496 1497 if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { 1498 error = EINVAL; 1499 goto out; 1500 } 1501 1502 oldsize = (1 << oldorder); 1503 newsize = (1 << (oldorder + 1)); 1504 1505 mtx_lock(&ioat->cleanup_lock); 1506 1507 head = ioat->head & (oldsize - 1); 1508 tail = ioat->tail & (oldsize - 1); 1509 1510 /* Copy old descriptors to new ring */ 1511 for (i = 0; i < oldsize; i++) 1512 newring[i] = ioat->ring[i]; 1513 1514 /* 1515 * If head has wrapped but tail hasn't, we must swap some descriptors 1516 * around so that tail can increment directly to head. 1517 */ 1518 if (head < tail) { 1519 for (i = 0; i <= head; i++) { 1520 tmp = newring[oldsize + i]; 1521 1522 newring[oldsize + i] = newring[i]; 1523 newring[oldsize + i]->id = oldsize + i; 1524 1525 newring[i] = tmp; 1526 newring[i]->id = i; 1527 } 1528 head += oldsize; 1529 } 1530 1531 KASSERT(head >= tail, ("invariants")); 1532 1533 /* Head didn't wrap; we only need to link in oldsize..newsize */ 1534 if (head < oldsize) { 1535 i = oldsize - 1; 1536 end = newsize; 1537 } else { 1538 /* Head did wrap; link newhead..newsize and 0..oldhead */ 1539 i = head; 1540 end = newsize + (head - oldsize) + 1; 1541 } 1542 1543 /* 1544 * Fix up hardware ring, being careful not to trample the active 1545 * section (tail -> head). 1546 */ 1547 for (; i < end; i++) { 1548 KASSERT((i & (newsize - 1)) < tail || 1549 (i & (newsize - 1)) >= head, ("trampling snake")); 1550 1551 next = newring[(i + 1) & (newsize - 1)]; 1552 hw = newring[i & (newsize - 1)]->u.dma; 1553 hw->next = next->hw_desc_bus_addr; 1554 } 1555 1556 free(ioat->ring, M_IOAT); 1557 ioat->ring = newring; 1558 ioat->ring_size_order = oldorder + 1; 1559 ioat->tail = tail; 1560 ioat->head = head; 1561 error = 0; 1562 1563 mtx_unlock(&ioat->cleanup_lock); 1564out: 1565 if (error) 1566 ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); 1567 return (error); 1568} 1569 1570static int 1571ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, 1572 struct ioat_descriptor **newring) 1573{ 1574 struct ioat_dma_hw_descriptor *hw; 1575 struct ioat_descriptor *ent, *next; 1576 uint32_t oldsize, newsize, current_idx, new_idx, i; 1577 int error; 1578 1579 CTR0(KTR_IOAT, __func__); 1580 1581 mtx_assert(&ioat->submit_lock, MA_OWNED); 1582 1583 if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { 1584 error = EINVAL; 1585 goto out_unlocked; 1586 } 1587 1588 oldsize = (1 << oldorder); 1589 newsize = (1 << (oldorder - 1)); 1590 1591 mtx_lock(&ioat->cleanup_lock); 1592 1593 /* Can't shrink below current active set! */ 1594 if (ioat_get_active(ioat) >= newsize) { 1595 error = ENOMEM; 1596 goto out; 1597 } 1598 1599 /* 1600 * Copy current descriptors to the new ring, dropping the removed 1601 * descriptors. 1602 */ 1603 for (i = 0; i < newsize; i++) { 1604 current_idx = (ioat->tail + i) & (oldsize - 1); 1605 new_idx = (ioat->tail + i) & (newsize - 1); 1606 1607 newring[new_idx] = ioat->ring[current_idx]; 1608 newring[new_idx]->id = new_idx; 1609 } 1610 1611 /* Free deleted descriptors */ 1612 for (i = newsize; i < oldsize; i++) { 1613 ent = ioat_get_ring_entry(ioat, ioat->tail + i); 1614 ioat_free_ring_entry(ioat, ent); 1615 } 1616 1617 /* Fix up hardware ring. */ 1618 hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; 1619 next = newring[(ioat->tail + newsize) & (newsize - 1)]; 1620 hw->next = next->hw_desc_bus_addr; 1621 1622 free(ioat->ring, M_IOAT); 1623 ioat->ring = newring; 1624 ioat->ring_size_order = oldorder - 1; 1625 error = 0; 1626 1627out: 1628 mtx_unlock(&ioat->cleanup_lock); 1629out_unlocked: 1630 if (error) 1631 ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); 1632 return (error); 1633} 1634 1635static void 1636ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1637{ 1638 struct ioat_descriptor *desc; 1639 1640 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1641 IOAT_CHANERR_STR); 1642 if (chanerr == 0) 1643 return; 1644 1645 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1646 1647 desc = ioat_get_ring_entry(ioat, ioat->tail + 0); 1648 dump_descriptor(desc->u.raw); 1649 1650 desc = ioat_get_ring_entry(ioat, ioat->tail + 1); 1651 dump_descriptor(desc->u.raw); 1652} 1653 1654static void 1655ioat_poll_timer_callback(void *arg) 1656{ 1657 struct ioat_softc *ioat; 1658 1659 ioat = arg; 1660 ioat_log_message(3, "%s\n", __func__); 1661 1662 ioat_process_events(ioat); 1663} 1664 1665static void 1666ioat_shrink_timer_callback(void *arg) 1667{ 1668 struct ioat_descriptor **newring; 1669 struct ioat_softc *ioat; 1670 uint32_t order; 1671 1672 ioat = arg; 1673 ioat_log_message(1, "%s\n", __func__); 1674 1675 /* Slowly scale the ring down if idle. */ 1676 mtx_lock(&ioat->submit_lock); 1677 1678 /* Don't run while the hardware is being reset. */ 1679 if (ioat->resetting) { 1680 mtx_unlock(&ioat->submit_lock); 1681 return; 1682 } 1683 1684 order = ioat->ring_size_order; 1685 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { 1686 mtx_unlock(&ioat->submit_lock); 1687 goto out; 1688 } 1689 ioat->is_resize_pending = TRUE; 1690 mtx_unlock(&ioat->submit_lock); 1691 1692 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, 1693 M_NOWAIT); 1694 1695 mtx_lock(&ioat->submit_lock); 1696 KASSERT(ioat->ring_size_order == order, 1697 ("resize_pending protects order")); 1698 1699 if (newring != NULL) 1700 ring_shrink(ioat, order, newring); 1701 1702 ioat->is_resize_pending = FALSE; 1703 mtx_unlock(&ioat->submit_lock); 1704 1705out: 1706 if (ioat->ring_size_order > IOAT_MIN_ORDER) 1707 callout_reset(&ioat->poll_timer, IOAT_SHRINK_PERIOD, 1708 ioat_shrink_timer_callback, ioat); 1709} 1710 1711/* 1712 * Support Functions 1713 */ 1714static void 1715ioat_submit_single(struct ioat_softc *ioat) 1716{ 1717 1718 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); 1719 atomic_add_rel_int(&ioat->head, 1); 1720 atomic_add_rel_int(&ioat->hw_head, 1); 1721 1722 if (!ioat->is_completion_pending) { 1723 ioat->is_completion_pending = TRUE; 1724 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 1725 ioat); 1726 callout_stop(&ioat->shrink_timer); 1727 } 1728 1729 ioat->stats.descriptors_submitted++; 1730} 1731 1732static int 1733ioat_reset_hw(struct ioat_softc *ioat) 1734{ 1735 uint64_t status; 1736 uint32_t chanerr; 1737 unsigned timeout; 1738 int error; 1739 1740 mtx_lock(IOAT_REFLK); 1741 while (ioat->resetting && !ioat->destroying) 1742 msleep(&ioat->resetting, IOAT_REFLK, 0, "IRH_drain", 0); 1743 if (ioat->destroying) { 1744 mtx_unlock(IOAT_REFLK); 1745 return (ENXIO); 1746 } 1747 ioat->resetting = TRUE; 1748 1749 ioat->quiescing = TRUE; 1750 ioat_drain_locked(ioat); 1751 mtx_unlock(IOAT_REFLK); 1752 1753 /* 1754 * Suspend ioat_process_events while the hardware and softc are in an 1755 * indeterminate state. 1756 */ 1757 mtx_lock(&ioat->cleanup_lock); 1758 ioat->resetting_cleanup = TRUE; 1759 mtx_unlock(&ioat->cleanup_lock); 1760 1761 status = ioat_get_chansts(ioat); 1762 if (is_ioat_active(status) || is_ioat_idle(status)) 1763 ioat_suspend(ioat); 1764 1765 /* Wait at most 20 ms */ 1766 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1767 timeout < 20; timeout++) { 1768 DELAY(1000); 1769 status = ioat_get_chansts(ioat); 1770 } 1771 if (timeout == 20) { 1772 error = ETIMEDOUT; 1773 goto out; 1774 } 1775 1776 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1777 1778 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1779 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1780 1781 /* 1782 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1783 * that can cause stability issues for IOAT v3. 1784 */ 1785 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1786 4); 1787 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1788 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1789 1790 /* 1791 * BDXDE and BWD models reset MSI-X registers on device reset. 1792 * Save/restore their contents manually. 1793 */ 1794 if (ioat_model_resets_msix(ioat)) { 1795 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1796 pci_save_state(ioat->device); 1797 } 1798 1799 ioat_reset(ioat); 1800 1801 /* Wait at most 20 ms */ 1802 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1803 DELAY(1000); 1804 if (timeout == 20) { 1805 error = ETIMEDOUT; 1806 goto out; 1807 } 1808 1809 if (ioat_model_resets_msix(ioat)) { 1810 ioat_log_message(1, "device resets registers; restored\n"); 1811 pci_restore_state(ioat->device); 1812 } 1813 1814 /* Reset attempts to return the hardware to "halted." */ 1815 status = ioat_get_chansts(ioat); 1816 if (is_ioat_active(status) || is_ioat_idle(status)) { 1817 /* So this really shouldn't happen... */ 1818 ioat_log_message(0, "Device is active after a reset?\n"); 1819 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1820 error = 0; 1821 goto out; 1822 } 1823 1824 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1825 if (chanerr != 0) { 1826 mtx_lock(&ioat->cleanup_lock); 1827 ioat_halted_debug(ioat, chanerr); 1828 mtx_unlock(&ioat->cleanup_lock); 1829 error = EIO; 1830 goto out; 1831 } 1832 1833 /* 1834 * Bring device back online after reset. Writing CHAINADDR brings the 1835 * device back to active. 1836 * 1837 * The internal ring counter resets to zero, so we have to start over 1838 * at zero as well. 1839 */ 1840 ioat->tail = ioat->head = ioat->hw_head = 0; 1841 ioat->last_seen = 0; 1842 *ioat->comp_update = 0; 1843 1844 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1845 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1846 ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); 1847 error = 0; 1848 1849out: 1850 /* 1851 * Resume completions now that ring state is consistent. 1852 * ioat_start_channel will add a pending completion and if we are still 1853 * blocking completions, we may livelock. 1854 */ 1855 mtx_lock(&ioat->cleanup_lock); 1856 ioat->resetting_cleanup = FALSE; 1857 mtx_unlock(&ioat->cleanup_lock); 1858 1859 /* Enqueues a null operation and ensures it completes. */ 1860 if (error == 0) 1861 error = ioat_start_channel(ioat); 1862 1863 /* Unblock submission of new work */ 1864 mtx_lock(IOAT_REFLK); 1865 ioat->quiescing = FALSE; 1866 wakeup(&ioat->quiescing); 1867 1868 ioat->resetting = FALSE; 1869 wakeup(&ioat->resetting); 1870 mtx_unlock(IOAT_REFLK); 1871 1872 return (error); 1873} 1874 1875static int 1876sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1877{ 1878 struct ioat_softc *ioat; 1879 struct sbuf sb; 1880 uint64_t status; 1881 int error; 1882 1883 ioat = arg1; 1884 1885 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1886 1887 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1888 switch (status) { 1889 case IOAT_CHANSTS_ACTIVE: 1890 sbuf_printf(&sb, "ACTIVE"); 1891 break; 1892 case IOAT_CHANSTS_IDLE: 1893 sbuf_printf(&sb, "IDLE"); 1894 break; 1895 case IOAT_CHANSTS_SUSPENDED: 1896 sbuf_printf(&sb, "SUSPENDED"); 1897 break; 1898 case IOAT_CHANSTS_HALTED: 1899 sbuf_printf(&sb, "HALTED"); 1900 break; 1901 case IOAT_CHANSTS_ARMED: 1902 sbuf_printf(&sb, "ARMED"); 1903 break; 1904 default: 1905 sbuf_printf(&sb, "UNKNOWN"); 1906 break; 1907 } 1908 error = sbuf_finish(&sb); 1909 sbuf_delete(&sb); 1910 1911 if (error != 0 || req->newptr == NULL) 1912 return (error); 1913 return (EINVAL); 1914} 1915 1916static int 1917sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1918{ 1919 struct ioat_softc *ioat; 1920 struct sbuf sb; 1921#define PRECISION "1" 1922 const uintmax_t factor = 10; 1923 uintmax_t rate; 1924 int error; 1925 1926 ioat = arg1; 1927 sbuf_new_for_sysctl(&sb, NULL, 16, req); 1928 1929 if (ioat->stats.interrupts == 0) { 1930 sbuf_printf(&sb, "NaN"); 1931 goto out; 1932 } 1933 rate = ioat->stats.descriptors_processed * factor / 1934 ioat->stats.interrupts; 1935 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1936 rate % factor); 1937#undef PRECISION 1938out: 1939 error = sbuf_finish(&sb); 1940 sbuf_delete(&sb); 1941 if (error != 0 || req->newptr == NULL) 1942 return (error); 1943 return (EINVAL); 1944} 1945 1946static int 1947sysctl_handle_error(SYSCTL_HANDLER_ARGS) 1948{ 1949 struct ioat_descriptor *desc; 1950 struct ioat_softc *ioat; 1951 int error, arg; 1952 1953 ioat = arg1; 1954 1955 arg = 0; 1956 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1957 if (error != 0 || req->newptr == NULL) 1958 return (error); 1959 1960 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1961 if (error != 0) 1962 return (error); 1963 1964 if (arg != 0) { 1965 ioat_acquire(&ioat->dmaengine); 1966 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1, 1967 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL, 1968 0); 1969 if (desc == NULL) 1970 error = ENOMEM; 1971 else 1972 ioat_submit_single(ioat); 1973 ioat_release(&ioat->dmaengine); 1974 } 1975 return (error); 1976} 1977 1978static int 1979sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1980{ 1981 struct ioat_softc *ioat; 1982 int error, arg; 1983 1984 ioat = arg1; 1985 1986 arg = 0; 1987 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1988 if (error != 0 || req->newptr == NULL) 1989 return (error); 1990 1991 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1992 if (error != 0) 1993 return (error); 1994 1995 if (arg != 0) 1996 error = ioat_reset_hw(ioat); 1997 1998 return (error); 1999} 2000 2001static void 2002dump_descriptor(void *hw_desc) 2003{ 2004 int i, j; 2005 2006 for (i = 0; i < 2; i++) { 2007 for (j = 0; j < 8; j++) 2008 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 2009 printf("\n"); 2010 } 2011} 2012 2013static void 2014ioat_setup_sysctl(device_t device) 2015{ 2016 struct sysctl_oid_list *par, *statpar, *state, *hammer; 2017 struct sysctl_ctx_list *ctx; 2018 struct sysctl_oid *tree, *tmp; 2019 struct ioat_softc *ioat; 2020 2021 ioat = DEVICE2SOFTC(device); 2022 ctx = device_get_sysctl_ctx(device); 2023 tree = device_get_sysctl_tree(device); 2024 par = SYSCTL_CHILDREN(tree); 2025 2026 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 2027 &ioat->version, 0, "HW version (0xMM form)"); 2028 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 2029 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 2030 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 2031 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 2032 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 2033 &ioat->intrdelay_max, 0, 2034 "Maximum configurable INTRDELAY on this channel (microseconds)"); 2035 2036 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 2037 "IOAT channel internal state"); 2038 state = SYSCTL_CHILDREN(tmp); 2039 2040 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 2041 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 2042 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 2043 0, "SW descriptor head pointer index"); 2044 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 2045 0, "SW descriptor tail pointer index"); 2046 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, 2047 &ioat->hw_head, 0, "HW DMACOUNT"); 2048 2049 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 2050 ioat->comp_update, "HW addr of last completion"); 2051 2052 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD, 2053 &ioat->is_resize_pending, 0, "resize pending"); 2054 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", 2055 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); 2056 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 2057 &ioat->is_reset_pending, 0, "reset pending"); 2058 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, 2059 &ioat->is_channel_running, 0, "channel running"); 2060 2061 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 2062 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 2063 "String of the channel status"); 2064 2065 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 2066 &ioat->cached_intrdelay, 0, 2067 "Current INTRDELAY on this channel (cached, microseconds)"); 2068 2069 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 2070 "Big hammers (mostly for testing)"); 2071 hammer = SYSCTL_CHILDREN(tmp); 2072 2073 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 2074 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 2075 "Set to non-zero to reset the hardware"); 2076 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error", 2077 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I", 2078 "Set to non-zero to inject a recoverable hardware error"); 2079 2080 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 2081 "IOAT channel statistics"); 2082 statpar = SYSCTL_CHILDREN(tmp); 2083 2084 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 2085 &ioat->stats.interrupts, 2086 "Number of interrupts processed on this channel"); 2087 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 2088 &ioat->stats.descriptors_processed, 2089 "Number of descriptors processed on this channel"); 2090 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 2091 &ioat->stats.descriptors_submitted, 2092 "Number of descriptors submitted to this channel"); 2093 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 2094 &ioat->stats.descriptors_error, 2095 "Number of descriptors failed by channel errors"); 2096 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 2097 &ioat->stats.channel_halts, 0, 2098 "Number of times the channel has halted"); 2099 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 2100 &ioat->stats.last_halt_chanerr, 0, 2101 "The raw CHANERR when the channel was last halted"); 2102 2103 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 2104 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 2105 "Descriptors per interrupt"); 2106} 2107 2108static inline struct ioat_softc * 2109ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 2110{ 2111 uint32_t old; 2112 2113 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 2114 2115 old = atomic_fetchadd_32(&ioat->refcnt, 1); 2116 KASSERT(old < UINT32_MAX, ("refcnt overflow")); 2117 2118#ifdef INVARIANTS 2119 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); 2120 KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); 2121#endif 2122 2123 return (ioat); 2124} 2125 2126static inline void 2127ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 2128{ 2129 2130 _ioat_putn(ioat, n, kind, FALSE); 2131} 2132 2133static inline void 2134ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 2135{ 2136 2137 _ioat_putn(ioat, n, kind, TRUE); 2138} 2139 2140static inline void 2141_ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, 2142 boolean_t locked) 2143{ 2144 uint32_t old; 2145 2146 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 2147 2148 if (n == 0) 2149 return; 2150 2151#ifdef INVARIANTS 2152 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); 2153 KASSERT(old >= n, ("refcnt kind underflow")); 2154#endif 2155 2156 /* Skip acquiring the lock if resulting refcnt > 0. */ 2157 for (;;) { 2158 old = ioat->refcnt; 2159 if (old <= n) 2160 break; 2161 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 2162 return; 2163 } 2164 2165 if (locked) 2166 mtx_assert(IOAT_REFLK, MA_OWNED); 2167 else 2168 mtx_lock(IOAT_REFLK); 2169 2170 old = atomic_fetchadd_32(&ioat->refcnt, -n); 2171 KASSERT(old >= n, ("refcnt error")); 2172 2173 if (old == n) 2174 wakeup(IOAT_REFLK); 2175 if (!locked) 2176 mtx_unlock(IOAT_REFLK); 2177} 2178 2179static inline void 2180ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 2181{ 2182 2183 ioat_putn(ioat, 1, kind); 2184} 2185 2186static void 2187ioat_drain_locked(struct ioat_softc *ioat) 2188{ 2189 2190 mtx_assert(IOAT_REFLK, MA_OWNED); 2191 while (ioat->refcnt > 0) 2192 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 2193} 2194 2195#ifdef DDB 2196#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 2197#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 2198DB_SHOW_COMMAND(ioat, db_show_ioat) 2199{ 2200 struct ioat_softc *sc; 2201 unsigned idx; 2202 2203 if (!have_addr) 2204 goto usage; 2205 idx = (unsigned)addr; 2206 if (addr >= ioat_channel_index) 2207 goto usage; 2208 2209 sc = ioat_channel[idx]; 2210 db_printf("ioat softc at %p\n", sc); 2211 if (sc == NULL) 2212 return; 2213 2214 db_printf(" version: %d\n", sc->version); 2215 db_printf(" chan_idx: %u\n", sc->chan_idx); 2216 db_printf(" submit_lock: "); 2217 db_show_lock(&sc->submit_lock); 2218 2219 db_printf(" capabilities: %b\n", (int)sc->capabilities, 2220 IOAT_DMACAP_STR); 2221 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 2222 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 2223 2224 db_printf(" poll_timer:\n"); 2225 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 2226 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 2227 db_printf(" c_func: %p\n", sc->poll_timer.c_func); 2228 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 2229 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 2230 2231 db_printf(" shrink_timer:\n"); 2232 db_printf(" c_time: %ju\n", (uintmax_t)sc->shrink_timer.c_time); 2233 db_printf(" c_arg: %p\n", sc->shrink_timer.c_arg); 2234 db_printf(" c_func: %p\n", sc->shrink_timer.c_func); 2235 db_printf(" c_lock: %p\n", sc->shrink_timer.c_lock); 2236 db_printf(" c_flags: 0x%x\n", (unsigned)sc->shrink_timer.c_flags); 2237 2238 db_printf(" quiescing: %d\n", (int)sc->quiescing); 2239 db_printf(" destroying: %d\n", (int)sc->destroying); 2240 db_printf(" is_resize_pending: %d\n", (int)sc->is_resize_pending); 2241 db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending); 2242 db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending); 2243 db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running); 2244 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 2245 db_printf(" resetting: %d\n", (int)sc->resetting); 2246 2247 db_printf(" head: %u\n", sc->head); 2248 db_printf(" tail: %u\n", sc->tail); 2249 db_printf(" hw_head: %u\n", sc->hw_head); 2250 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 2251 db_printf(" last_seen: 0x%lx\n", sc->last_seen); 2252 db_printf(" ring: %p\n", sc->ring); 2253 2254 db_printf(" cleanup_lock: "); 2255 db_show_lock(&sc->cleanup_lock); 2256 2257 db_printf(" refcnt: %u\n", sc->refcnt); 2258#ifdef INVARIANTS 2259 CTASSERT(IOAT_NUM_REF_KINDS == 2); 2260 db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0], 2261 sc->refkinds[1]); 2262#endif 2263 db_printf(" stats:\n"); 2264 db_printf(" interrupts: %lu\n", sc->stats.interrupts); 2265 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 2266 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 2267 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 2268 2269 db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 2270 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 2271 2272 if (db_pager_quit) 2273 return; 2274 2275 db_printf(" hw status:\n"); 2276 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 2277 db_printf(" chanctrl: 0x%x\n", 2278 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 2279 db_printf(" chancmd: 0x%x\n", 2280 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 2281 db_printf(" dmacount: 0x%x\n", 2282 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 2283 db_printf(" chainaddr: 0x%lx\n", 2284 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 2285 db_printf(" chancmp: 0x%lx\n", 2286 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 2287 db_printf(" chanerr: %b\n", 2288 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 2289 return; 2290usage: 2291 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 2292 return; 2293} 2294#endif /* DDB */ 2295