1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD$"); 30 31#include "opt_ddb.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/bus.h> 36#include <sys/conf.h> 37#include <sys/domainset.h> 38#include <sys/fail.h> 39#include <sys/ioccom.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/malloc.h> 43#include <sys/module.h> 44#include <sys/mutex.h> 45#include <sys/rman.h> 46#include <sys/sbuf.h> 47#include <sys/smp.h> 48#include <sys/sysctl.h> 49#include <sys/taskqueue.h> 50#include <sys/time.h> 51#include <dev/pci/pcireg.h> 52#include <dev/pci/pcivar.h> 53#include <machine/bus.h> 54#include <machine/resource.h> 55#include <machine/stdarg.h> 56 57#ifdef DDB 58#include <ddb/ddb.h> 59#endif 60 61#include "ioat.h" 62#include "ioat_hw.h" 63#include "ioat_internal.h" 64 65#ifndef BUS_SPACE_MAXADDR_40BIT 66#define BUS_SPACE_MAXADDR_40BIT MIN(BUS_SPACE_MAXADDR, 0xFFFFFFFFFFULL) 67#endif 68#ifndef BUS_SPACE_MAXADDR_46BIT 69#define BUS_SPACE_MAXADDR_46BIT MIN(BUS_SPACE_MAXADDR, 0x3FFFFFFFFFFFULL) 70#endif 71 72static int ioat_probe(device_t device); 73static int ioat_attach(device_t device); 74static int ioat_detach(device_t device); 75static int ioat_setup_intr(struct ioat_softc *ioat); 76static int ioat_teardown_intr(struct ioat_softc *ioat); 77static int ioat3_attach(device_t device); 78static int ioat_start_channel(struct ioat_softc *ioat); 79static int ioat_map_pci_bar(struct ioat_softc *ioat); 80static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 81 int error); 82static void ioat_interrupt_handler(void *arg); 83static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 84static int chanerr_to_errno(uint32_t); 85static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr); 86static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 87static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 88static void ioat_free_ring(struct ioat_softc *, uint32_t size, 89 struct ioat_descriptor *); 90static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 91static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *, 92 uint32_t index); 93static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *, 94 uint32_t index); 95static void ioat_halted_debug(struct ioat_softc *, uint32_t); 96static void ioat_poll_timer_callback(void *arg); 97static void dump_descriptor(void *hw_desc); 98static void ioat_submit_single(struct ioat_softc *ioat); 99static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 100 int error); 101static int ioat_reset_hw(struct ioat_softc *ioat); 102static void ioat_reset_hw_task(void *, int); 103static void ioat_setup_sysctl(device_t device); 104static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 105static void ioat_get(struct ioat_softc *); 106static void ioat_put(struct ioat_softc *); 107static void ioat_drain_locked(struct ioat_softc *); 108 109#define ioat_log_message(v, ...) do { \ 110 if ((v) <= g_ioat_debug_level) { \ 111 device_printf(ioat->device, __VA_ARGS__); \ 112 } \ 113} while (0) 114 115MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 116SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 117 118static int g_force_legacy_interrupts; 119SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 120 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 121 122int g_ioat_debug_level = 0; 123SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 124 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 125 126unsigned g_ioat_ring_order = 13; 127SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order, 128 0, "Set IOAT ring order. (1 << this) == ring size."); 129 130/* 131 * OS <-> Driver interface structures 132 */ 133static device_method_t ioat_pci_methods[] = { 134 /* Device interface */ 135 DEVMETHOD(device_probe, ioat_probe), 136 DEVMETHOD(device_attach, ioat_attach), 137 DEVMETHOD(device_detach, ioat_detach), 138 DEVMETHOD_END 139}; 140 141static driver_t ioat_pci_driver = { 142 "ioat", 143 ioat_pci_methods, 144 sizeof(struct ioat_softc), 145}; 146 147static devclass_t ioat_devclass; 148DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 149MODULE_VERSION(ioat, 1); 150 151/* 152 * Private data structures 153 */ 154static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 155static unsigned ioat_channel_index = 0; 156SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 157 "Number of IOAT channels attached"); 158static struct mtx ioat_list_mtx; 159MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF); 160 161static struct _pcsid 162{ 163 u_int32_t type; 164 const char *desc; 165} pci_ids[] = { 166 { 0x34308086, "TBG IOAT Ch0" }, 167 { 0x34318086, "TBG IOAT Ch1" }, 168 { 0x34328086, "TBG IOAT Ch2" }, 169 { 0x34338086, "TBG IOAT Ch3" }, 170 { 0x34298086, "TBG IOAT Ch4" }, 171 { 0x342a8086, "TBG IOAT Ch5" }, 172 { 0x342b8086, "TBG IOAT Ch6" }, 173 { 0x342c8086, "TBG IOAT Ch7" }, 174 175 { 0x37108086, "JSF IOAT Ch0" }, 176 { 0x37118086, "JSF IOAT Ch1" }, 177 { 0x37128086, "JSF IOAT Ch2" }, 178 { 0x37138086, "JSF IOAT Ch3" }, 179 { 0x37148086, "JSF IOAT Ch4" }, 180 { 0x37158086, "JSF IOAT Ch5" }, 181 { 0x37168086, "JSF IOAT Ch6" }, 182 { 0x37178086, "JSF IOAT Ch7" }, 183 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 184 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 185 186 { 0x3c208086, "SNB IOAT Ch0" }, 187 { 0x3c218086, "SNB IOAT Ch1" }, 188 { 0x3c228086, "SNB IOAT Ch2" }, 189 { 0x3c238086, "SNB IOAT Ch3" }, 190 { 0x3c248086, "SNB IOAT Ch4" }, 191 { 0x3c258086, "SNB IOAT Ch5" }, 192 { 0x3c268086, "SNB IOAT Ch6" }, 193 { 0x3c278086, "SNB IOAT Ch7" }, 194 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 195 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 196 197 { 0x0e208086, "IVB IOAT Ch0" }, 198 { 0x0e218086, "IVB IOAT Ch1" }, 199 { 0x0e228086, "IVB IOAT Ch2" }, 200 { 0x0e238086, "IVB IOAT Ch3" }, 201 { 0x0e248086, "IVB IOAT Ch4" }, 202 { 0x0e258086, "IVB IOAT Ch5" }, 203 { 0x0e268086, "IVB IOAT Ch6" }, 204 { 0x0e278086, "IVB IOAT Ch7" }, 205 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 206 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 207 208 { 0x2f208086, "HSW IOAT Ch0" }, 209 { 0x2f218086, "HSW IOAT Ch1" }, 210 { 0x2f228086, "HSW IOAT Ch2" }, 211 { 0x2f238086, "HSW IOAT Ch3" }, 212 { 0x2f248086, "HSW IOAT Ch4" }, 213 { 0x2f258086, "HSW IOAT Ch5" }, 214 { 0x2f268086, "HSW IOAT Ch6" }, 215 { 0x2f278086, "HSW IOAT Ch7" }, 216 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 217 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 218 219 { 0x0c508086, "BWD IOAT Ch0" }, 220 { 0x0c518086, "BWD IOAT Ch1" }, 221 { 0x0c528086, "BWD IOAT Ch2" }, 222 { 0x0c538086, "BWD IOAT Ch3" }, 223 224 { 0x6f508086, "BDXDE IOAT Ch0" }, 225 { 0x6f518086, "BDXDE IOAT Ch1" }, 226 { 0x6f528086, "BDXDE IOAT Ch2" }, 227 { 0x6f538086, "BDXDE IOAT Ch3" }, 228 229 { 0x6f208086, "BDX IOAT Ch0" }, 230 { 0x6f218086, "BDX IOAT Ch1" }, 231 { 0x6f228086, "BDX IOAT Ch2" }, 232 { 0x6f238086, "BDX IOAT Ch3" }, 233 { 0x6f248086, "BDX IOAT Ch4" }, 234 { 0x6f258086, "BDX IOAT Ch5" }, 235 { 0x6f268086, "BDX IOAT Ch6" }, 236 { 0x6f278086, "BDX IOAT Ch7" }, 237 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 238 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 239 240 { 0x20218086, "SKX IOAT" }, 241}; 242 243MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ioat, pci_ids, 244 nitems(pci_ids)); 245 246/* 247 * OS <-> Driver linkage functions 248 */ 249static int 250ioat_probe(device_t device) 251{ 252 struct _pcsid *ep; 253 u_int32_t type; 254 255 type = pci_get_devid(device); 256 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) { 257 if (ep->type == type) { 258 device_set_desc(device, ep->desc); 259 return (0); 260 } 261 } 262 return (ENXIO); 263} 264 265static int 266ioat_attach(device_t device) 267{ 268 struct ioat_softc *ioat; 269 int error, i; 270 271 ioat = DEVICE2SOFTC(device); 272 ioat->device = device; 273 if (bus_get_domain(device, &ioat->domain) != 0) 274 ioat->domain = 0; 275 ioat->cpu = CPU_FFS(&cpuset_domain[ioat->domain]) - 1; 276 if (ioat->cpu < 0) 277 ioat->cpu = CPU_FIRST(); 278 279 error = ioat_map_pci_bar(ioat); 280 if (error != 0) 281 goto err; 282 283 ioat->version = ioat_read_cbver(ioat); 284 if (ioat->version < IOAT_VER_3_0) { 285 error = ENODEV; 286 goto err; 287 } 288 289 error = ioat3_attach(device); 290 if (error != 0) 291 goto err; 292 293 error = pci_enable_busmaster(device); 294 if (error != 0) 295 goto err; 296 297 error = ioat_setup_intr(ioat); 298 if (error != 0) 299 goto err; 300 301 error = ioat_reset_hw(ioat); 302 if (error != 0) 303 goto err; 304 305 ioat_process_events(ioat, FALSE); 306 ioat_setup_sysctl(device); 307 308 mtx_lock(&ioat_list_mtx); 309 for (i = 0; i < IOAT_MAX_CHANNELS; i++) { 310 if (ioat_channel[i] == NULL) 311 break; 312 } 313 if (i >= IOAT_MAX_CHANNELS) { 314 mtx_unlock(&ioat_list_mtx); 315 device_printf(device, "Too many I/OAT devices in system\n"); 316 error = ENXIO; 317 goto err; 318 } 319 ioat->chan_idx = i; 320 ioat_channel[i] = ioat; 321 if (i >= ioat_channel_index) 322 ioat_channel_index = i + 1; 323 mtx_unlock(&ioat_list_mtx); 324 325 ioat_test_attach(); 326 327err: 328 if (error != 0) 329 ioat_detach(device); 330 return (error); 331} 332 333static inline int 334ioat_bus_dmamap_destroy(struct ioat_softc *ioat, const char *func, 335 bus_dma_tag_t dmat, bus_dmamap_t map) 336{ 337 int error; 338 339 error = bus_dmamap_destroy(dmat, map); 340 if (error != 0) { 341 ioat_log_message(0, 342 "%s: bus_dmamap_destroy failed %d\n", func, error); 343 } 344 345 return (error); 346} 347 348static int 349ioat_detach(device_t device) 350{ 351 struct ioat_softc *ioat; 352 int i, error; 353 354 ioat = DEVICE2SOFTC(device); 355 356 mtx_lock(&ioat_list_mtx); 357 ioat_channel[ioat->chan_idx] = NULL; 358 while (ioat_channel_index > 0 && 359 ioat_channel[ioat_channel_index - 1] == NULL) 360 ioat_channel_index--; 361 mtx_unlock(&ioat_list_mtx); 362 363 ioat_test_detach(); 364 taskqueue_drain(taskqueue_thread, &ioat->reset_task); 365 366 mtx_lock(&ioat->submit_lock); 367 ioat->quiescing = TRUE; 368 ioat->destroying = TRUE; 369 wakeup(&ioat->quiescing); 370 wakeup(&ioat->resetting); 371 372 ioat_drain_locked(ioat); 373 mtx_unlock(&ioat->submit_lock); 374 mtx_lock(&ioat->cleanup_lock); 375 while (ioat_get_active(ioat) > 0) 376 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 377 mtx_unlock(&ioat->cleanup_lock); 378 379 ioat_teardown_intr(ioat); 380 callout_drain(&ioat->poll_timer); 381 382 pci_disable_busmaster(device); 383 384 if (ioat->pci_resource != NULL) 385 bus_release_resource(device, SYS_RES_MEMORY, 386 ioat->pci_resource_id, ioat->pci_resource); 387 388 if (ioat->data_tag != NULL) { 389 for (i = 0; i < 1 << ioat->ring_size_order; i++) { 390 error = ioat_bus_dmamap_destroy(ioat, __func__, 391 ioat->data_tag, ioat->ring[i].src_dmamap); 392 if (error != 0) 393 return (error); 394 } 395 for (i = 0; i < 1 << ioat->ring_size_order; i++) { 396 error = ioat_bus_dmamap_destroy(ioat, __func__, 397 ioat->data_tag, ioat->ring[i].dst_dmamap); 398 if (error != 0) 399 return (error); 400 } 401 402 for (i = 0; i < 1 << ioat->ring_size_order; i++) { 403 error = ioat_bus_dmamap_destroy(ioat, __func__, 404 ioat->data_tag, ioat->ring[i].src2_dmamap); 405 if (error != 0) 406 return (error); 407 } 408 for (i = 0; i < 1 << ioat->ring_size_order; i++) { 409 error = ioat_bus_dmamap_destroy(ioat, __func__, 410 ioat->data_tag, ioat->ring[i].dst2_dmamap); 411 if (error != 0) 412 return (error); 413 } 414 415 bus_dma_tag_destroy(ioat->data_tag); 416 } 417 418 if (ioat->ring != NULL) 419 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 420 421 if (ioat->comp_update != NULL) { 422 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 423 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 424 ioat->comp_update_map); 425 bus_dma_tag_destroy(ioat->comp_update_tag); 426 } 427 428 if (ioat->hw_desc_ring != NULL) { 429 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map); 430 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring, 431 ioat->hw_desc_map); 432 bus_dma_tag_destroy(ioat->hw_desc_tag); 433 } 434 435 return (0); 436} 437 438static int 439ioat_teardown_intr(struct ioat_softc *ioat) 440{ 441 442 if (ioat->tag != NULL) 443 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 444 445 if (ioat->res != NULL) 446 bus_release_resource(ioat->device, SYS_RES_IRQ, 447 rman_get_rid(ioat->res), ioat->res); 448 449 pci_release_msi(ioat->device); 450 return (0); 451} 452 453static int 454ioat_start_channel(struct ioat_softc *ioat) 455{ 456 struct ioat_dma_hw_descriptor *hw_desc; 457 struct ioat_descriptor *desc; 458 struct bus_dmadesc *dmadesc; 459 uint64_t status; 460 uint32_t chanerr; 461 int i; 462 463 ioat_acquire(&ioat->dmaengine); 464 465 /* Submit 'NULL' operation manually to avoid quiescing flag */ 466 desc = ioat_get_ring_entry(ioat, ioat->head); 467 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma; 468 dmadesc = &desc->bus_dmadesc; 469 470 dmadesc->callback_fn = NULL; 471 dmadesc->callback_arg = NULL; 472 473 hw_desc->u.control_raw = 0; 474 hw_desc->u.control_generic.op = IOAT_OP_COPY; 475 hw_desc->u.control_generic.completion_update = 1; 476 hw_desc->size = 8; 477 hw_desc->src_addr = 0; 478 hw_desc->dest_addr = 0; 479 hw_desc->u.control.null = 1; 480 481 ioat_submit_single(ioat); 482 ioat_release(&ioat->dmaengine); 483 484 for (i = 0; i < 100; i++) { 485 DELAY(1); 486 status = ioat_get_chansts(ioat); 487 if (is_ioat_idle(status)) 488 return (0); 489 } 490 491 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 492 ioat_log_message(0, "could not start channel: " 493 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 494 IOAT_CHANERR_STR); 495 return (ENXIO); 496} 497 498/* 499 * Initialize Hardware 500 */ 501static int 502ioat3_attach(device_t device) 503{ 504 struct ioat_softc *ioat; 505 struct ioat_descriptor *ring; 506 struct ioat_dma_hw_descriptor *dma_hw_desc; 507 void *hw_desc; 508 bus_addr_t lowaddr; 509 size_t ringsz; 510 int i, num_descriptors; 511 int error; 512 uint8_t xfercap; 513 514 error = 0; 515 ioat = DEVICE2SOFTC(device); 516 ioat->capabilities = ioat_read_dmacapability(ioat); 517 518 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 519 IOAT_DMACAP_STR); 520 521 xfercap = ioat_read_xfercap(ioat); 522 ioat->max_xfer_size = 1 << xfercap; 523 524 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 525 IOAT_INTRDELAY_SUPPORTED) != 0; 526 if (ioat->intrdelay_supported) 527 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 528 529 /* TODO: need to check DCA here if we ever do XOR/PQ */ 530 531 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 532 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 533 callout_init(&ioat->poll_timer, 1); 534 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 535 536 /* Establish lock order for Witness */ 537 mtx_lock(&ioat->cleanup_lock); 538 mtx_lock(&ioat->submit_lock); 539 mtx_unlock(&ioat->submit_lock); 540 mtx_unlock(&ioat->cleanup_lock); 541 542 ioat->is_submitter_processing = FALSE; 543 544 if (ioat->version >= IOAT_VER_3_3) 545 lowaddr = BUS_SPACE_MAXADDR_48BIT; 546 else if (ioat->version >= IOAT_VER_3_2) 547 lowaddr = BUS_SPACE_MAXADDR_46BIT; 548 else 549 lowaddr = BUS_SPACE_MAXADDR_40BIT; 550 551 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 552 sizeof(uint64_t), 0x0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 553 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 554 &ioat->comp_update_tag); 555 if (error != 0) 556 return (error); 557 558 error = bus_dmamem_alloc(ioat->comp_update_tag, 559 (void **)&ioat->comp_update, BUS_DMA_ZERO | BUS_DMA_WAITOK, 560 &ioat->comp_update_map); 561 if (error != 0) 562 return (error); 563 564 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 565 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 566 BUS_DMA_NOWAIT); 567 if (error != 0) 568 return (error); 569 570 ioat->ring_size_order = g_ioat_ring_order; 571 num_descriptors = 1 << ioat->ring_size_order; 572 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors; 573 574 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 575 2 * 1024 * 1024, 0x0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 576 ringsz, 1, ringsz, 0, NULL, NULL, &ioat->hw_desc_tag); 577 if (error != 0) 578 return (error); 579 580 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc, 581 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map); 582 if (error != 0) 583 return (error); 584 585 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 586 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_NOWAIT); 587 if (error) 588 return (error); 589 590 ioat->hw_desc_ring = hw_desc; 591 592 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 593 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 594 ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL, 595 &ioat->data_tag); 596 if (error != 0) 597 return (error); 598 ioat->ring = malloc_domainset(num_descriptors * sizeof(*ring), M_IOAT, 599 DOMAINSET_PREF(ioat->domain), M_ZERO | M_WAITOK); 600 601 ring = ioat->ring; 602 for (i = 0; i < num_descriptors; i++) { 603 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc)); 604 ring[i].id = i; 605 error = bus_dmamap_create(ioat->data_tag, 0, 606 &ring[i].src_dmamap); 607 if (error != 0) { 608 ioat_log_message(0, 609 "%s: bus_dmamap_create failed %d\n", __func__, 610 error); 611 return (error); 612 } 613 error = bus_dmamap_create(ioat->data_tag, 0, 614 &ring[i].dst_dmamap); 615 if (error != 0) { 616 ioat_log_message(0, 617 "%s: bus_dmamap_create failed %d\n", __func__, 618 error); 619 return (error); 620 } 621 error = bus_dmamap_create(ioat->data_tag, 0, 622 &ring[i].src2_dmamap); 623 if (error != 0) { 624 ioat_log_message(0, 625 "%s: bus_dmamap_create failed %d\n", __func__, 626 error); 627 return (error); 628 } 629 error = bus_dmamap_create(ioat->data_tag, 0, 630 &ring[i].dst2_dmamap); 631 if (error != 0) { 632 ioat_log_message(0, 633 "%s: bus_dmamap_create failed %d\n", __func__, 634 error); 635 return (error); 636 } 637 } 638 639 for (i = 0; i < num_descriptors; i++) { 640 dma_hw_desc = &ioat->hw_desc_ring[i].dma; 641 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1); 642 } 643 644 ioat->tail = ioat->head = 0; 645 *ioat->comp_update = ioat->last_seen = 646 RING_PHYS_ADDR(ioat, ioat->tail - 1); 647 return (0); 648} 649 650static int 651ioat_map_pci_bar(struct ioat_softc *ioat) 652{ 653 654 ioat->pci_resource_id = PCIR_BAR(0); 655 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 656 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 657 658 if (ioat->pci_resource == NULL) { 659 ioat_log_message(0, "unable to allocate pci resource\n"); 660 return (ENODEV); 661 } 662 663 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 664 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 665 return (0); 666} 667 668static void 669ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 670{ 671 struct ioat_softc *ioat = arg; 672 673 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 674 ioat->comp_update_bus_addr = seg[0].ds_addr; 675} 676 677static void 678ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 679{ 680 bus_addr_t *baddr; 681 682 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 683 baddr = arg; 684 *baddr = segs->ds_addr; 685} 686 687/* 688 * Interrupt setup and handlers 689 */ 690static int 691ioat_setup_intr(struct ioat_softc *ioat) 692{ 693 uint32_t num_vectors; 694 int error; 695 boolean_t use_msix; 696 boolean_t force_legacy_interrupts; 697 698 use_msix = FALSE; 699 force_legacy_interrupts = FALSE; 700 701 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 702 num_vectors = 1; 703 pci_alloc_msix(ioat->device, &num_vectors); 704 if (num_vectors == 1) 705 use_msix = TRUE; 706 } 707 708 if (use_msix) { 709 ioat->rid = 1; 710 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 711 &ioat->rid, RF_ACTIVE); 712 } else { 713 ioat->rid = 0; 714 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 715 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 716 } 717 if (ioat->res == NULL) { 718 ioat_log_message(0, "bus_alloc_resource failed\n"); 719 return (ENOMEM); 720 } 721 722 ioat->tag = NULL; 723 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 724 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 725 if (error != 0) { 726 ioat_log_message(0, "bus_setup_intr failed\n"); 727 return (error); 728 } 729 730 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 731 return (0); 732} 733 734static boolean_t 735ioat_model_resets_msix(struct ioat_softc *ioat) 736{ 737 u_int32_t pciid; 738 739 pciid = pci_get_devid(ioat->device); 740 switch (pciid) { 741 /* BWD: */ 742 case 0x0c508086: 743 case 0x0c518086: 744 case 0x0c528086: 745 case 0x0c538086: 746 /* BDXDE: */ 747 case 0x6f508086: 748 case 0x6f518086: 749 case 0x6f528086: 750 case 0x6f538086: 751 return (TRUE); 752 } 753 754 return (FALSE); 755} 756 757static void 758ioat_interrupt_handler(void *arg) 759{ 760 struct ioat_softc *ioat = arg; 761 762 ioat->stats.interrupts++; 763 ioat_process_events(ioat, TRUE); 764} 765 766static int 767chanerr_to_errno(uint32_t chanerr) 768{ 769 770 if (chanerr == 0) 771 return (0); 772 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 773 return (EFAULT); 774 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 775 return (EIO); 776 /* This one is probably our fault: */ 777 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 778 return (EIO); 779 return (EIO); 780} 781 782static void 783ioat_process_events(struct ioat_softc *ioat, boolean_t intr) 784{ 785 struct ioat_descriptor *desc; 786 struct bus_dmadesc *dmadesc; 787 uint64_t comp_update, status; 788 uint32_t completed, chanerr; 789 int error; 790 791 if (intr) { 792 mtx_lock(&ioat->cleanup_lock); 793 } else { 794 if (!mtx_trylock(&ioat->cleanup_lock)) 795 return; 796 } 797 798 /* 799 * Don't run while the hardware is being reset. Reset is responsible 800 * for blocking new work and draining & completing existing work, so 801 * there is nothing to do until new work is queued after reset anyway. 802 */ 803 if (ioat->resetting_cleanup) { 804 mtx_unlock(&ioat->cleanup_lock); 805 return; 806 } 807 808 completed = 0; 809 comp_update = *ioat->comp_update; 810 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 811 812 if (status < ioat->hw_desc_bus_addr || 813 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) * 814 sizeof(struct ioat_generic_hw_descriptor)) 815 panic("Bogus completion address %jx (channel %u)", 816 (uintmax_t)status, ioat->chan_idx); 817 818 if (status == ioat->last_seen) { 819 /* 820 * If we landed in process_events and nothing has been 821 * completed, check for a timeout due to channel halt. 822 */ 823 goto out; 824 } 825 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx", 826 __func__, ioat->chan_idx, comp_update, ioat->last_seen); 827 828 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) { 829 desc = ioat_get_ring_entry(ioat, ioat->tail); 830 dmadesc = &desc->bus_dmadesc; 831 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)", 832 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 833 dmadesc->callback_arg); 834 835 bus_dmamap_unload(ioat->data_tag, desc->src_dmamap); 836 bus_dmamap_unload(ioat->data_tag, desc->dst_dmamap); 837 bus_dmamap_unload(ioat->data_tag, desc->src2_dmamap); 838 bus_dmamap_unload(ioat->data_tag, desc->dst2_dmamap); 839 840 if (dmadesc->callback_fn != NULL) 841 dmadesc->callback_fn(dmadesc->callback_arg, 0); 842 843 completed++; 844 ioat->tail++; 845 } 846 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 847 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 848 849 if (completed != 0) { 850 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1); 851 ioat->stats.descriptors_processed += completed; 852 wakeup(&ioat->tail); 853 } 854 855out: 856 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 857 mtx_unlock(&ioat->cleanup_lock); 858 859 /* 860 * The device doesn't seem to reliably push suspend/halt statuses to 861 * the channel completion memory address, so poll the device register 862 * here. For performance reasons skip it on interrupts, do it only 863 * on much more rare polling events. 864 */ 865 if (!intr) 866 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 867 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 868 return; 869 870 ioat->stats.channel_halts++; 871 872 /* 873 * Fatal programming error on this DMA channel. Flush any outstanding 874 * work with error status and restart the engine. 875 */ 876 mtx_lock(&ioat->submit_lock); 877 ioat->quiescing = TRUE; 878 mtx_unlock(&ioat->submit_lock); 879 880 /* 881 * This is safe to do here because the submit queue is quiesced. We 882 * know that we will drain all outstanding events, so ioat_reset_hw 883 * can't deadlock. It is necessary to protect other ioat_process_event 884 * threads from racing ioat_reset_hw, reading an indeterminate hw 885 * state, and attempting to continue issuing completions. 886 */ 887 mtx_lock(&ioat->cleanup_lock); 888 ioat->resetting_cleanup = TRUE; 889 890 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 891 if (1 <= g_ioat_debug_level) 892 ioat_halted_debug(ioat, chanerr); 893 ioat->stats.last_halt_chanerr = chanerr; 894 895 while (ioat_get_active(ioat) > 0) { 896 desc = ioat_get_ring_entry(ioat, ioat->tail); 897 dmadesc = &desc->bus_dmadesc; 898 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)", 899 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 900 dmadesc->callback_arg); 901 902 if (dmadesc->callback_fn != NULL) 903 dmadesc->callback_fn(dmadesc->callback_arg, 904 chanerr_to_errno(chanerr)); 905 906 ioat->tail++; 907 ioat->stats.descriptors_processed++; 908 ioat->stats.descriptors_error++; 909 } 910 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 911 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 912 913 /* Clear error status */ 914 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 915 916 mtx_unlock(&ioat->cleanup_lock); 917 918 ioat_log_message(0, "Resetting channel to recover from error\n"); 919 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 920 KASSERT(error == 0, 921 ("%s: taskqueue_enqueue failed: %d", __func__, error)); 922} 923 924static void 925ioat_reset_hw_task(void *ctx, int pending __unused) 926{ 927 struct ioat_softc *ioat; 928 int error; 929 930 ioat = ctx; 931 ioat_log_message(1, "%s: Resetting channel\n", __func__); 932 933 error = ioat_reset_hw(ioat); 934 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 935 (void)error; 936} 937 938/* 939 * User API functions 940 */ 941unsigned 942ioat_get_nchannels(void) 943{ 944 945 return (ioat_channel_index); 946} 947 948bus_dmaengine_t 949ioat_get_dmaengine(uint32_t index, int flags) 950{ 951 struct ioat_softc *ioat; 952 953 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 954 ("invalid flags: 0x%08x", flags)); 955 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 956 ("invalid wait | nowait")); 957 958 mtx_lock(&ioat_list_mtx); 959 if (index >= ioat_channel_index || 960 (ioat = ioat_channel[index]) == NULL) { 961 mtx_unlock(&ioat_list_mtx); 962 return (NULL); 963 } 964 mtx_lock(&ioat->submit_lock); 965 mtx_unlock(&ioat_list_mtx); 966 967 if (ioat->destroying) { 968 mtx_unlock(&ioat->submit_lock); 969 return (NULL); 970 } 971 972 ioat_get(ioat); 973 if (ioat->quiescing) { 974 if ((flags & M_NOWAIT) != 0) { 975 ioat_put(ioat); 976 mtx_unlock(&ioat->submit_lock); 977 return (NULL); 978 } 979 980 while (ioat->quiescing && !ioat->destroying) 981 msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0); 982 983 if (ioat->destroying) { 984 ioat_put(ioat); 985 mtx_unlock(&ioat->submit_lock); 986 return (NULL); 987 } 988 } 989 mtx_unlock(&ioat->submit_lock); 990 return (&ioat->dmaengine); 991} 992 993void 994ioat_put_dmaengine(bus_dmaengine_t dmaengine) 995{ 996 struct ioat_softc *ioat; 997 998 ioat = to_ioat_softc(dmaengine); 999 mtx_lock(&ioat->submit_lock); 1000 ioat_put(ioat); 1001 mtx_unlock(&ioat->submit_lock); 1002} 1003 1004int 1005ioat_get_hwversion(bus_dmaengine_t dmaengine) 1006{ 1007 struct ioat_softc *ioat; 1008 1009 ioat = to_ioat_softc(dmaengine); 1010 return (ioat->version); 1011} 1012 1013size_t 1014ioat_get_max_io_size(bus_dmaengine_t dmaengine) 1015{ 1016 struct ioat_softc *ioat; 1017 1018 ioat = to_ioat_softc(dmaengine); 1019 return (ioat->max_xfer_size); 1020} 1021 1022uint32_t 1023ioat_get_capabilities(bus_dmaengine_t dmaengine) 1024{ 1025 struct ioat_softc *ioat; 1026 1027 ioat = to_ioat_softc(dmaengine); 1028 return (ioat->capabilities); 1029} 1030 1031int 1032ioat_get_domain(bus_dmaengine_t dmaengine, int *domain) 1033{ 1034 struct ioat_softc *ioat; 1035 1036 ioat = to_ioat_softc(dmaengine); 1037 return (bus_get_domain(ioat->device, domain)); 1038} 1039 1040int 1041ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 1042{ 1043 struct ioat_softc *ioat; 1044 1045 ioat = to_ioat_softc(dmaengine); 1046 if (!ioat->intrdelay_supported) 1047 return (ENODEV); 1048 if (delay > ioat->intrdelay_max) 1049 return (ERANGE); 1050 1051 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 1052 ioat->cached_intrdelay = 1053 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 1054 return (0); 1055} 1056 1057uint16_t 1058ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 1059{ 1060 struct ioat_softc *ioat; 1061 1062 ioat = to_ioat_softc(dmaengine); 1063 return (ioat->intrdelay_max); 1064} 1065 1066void 1067ioat_acquire(bus_dmaengine_t dmaengine) 1068{ 1069 struct ioat_softc *ioat; 1070 1071 ioat = to_ioat_softc(dmaengine); 1072 mtx_lock(&ioat->submit_lock); 1073 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1074 ioat->acq_head = ioat->head; 1075} 1076 1077int 1078ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 1079{ 1080 struct ioat_softc *ioat; 1081 int error; 1082 1083 ioat = to_ioat_softc(dmaengine); 1084 ioat_acquire(dmaengine); 1085 1086 error = ioat_reserve_space(ioat, n, mflags); 1087 if (error != 0) 1088 ioat_release(dmaengine); 1089 return (error); 1090} 1091 1092void 1093ioat_release(bus_dmaengine_t dmaengine) 1094{ 1095 struct ioat_softc *ioat; 1096 1097 ioat = to_ioat_softc(dmaengine); 1098 CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__, 1099 ioat->chan_idx, ioat->head); 1100 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */); 1101 CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__, 1102 ioat->chan_idx, ioat->head); 1103 1104 if (ioat->acq_head != ioat->head) { 1105 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, 1106 (uint16_t)ioat->head); 1107 1108 if (!callout_pending(&ioat->poll_timer)) { 1109 callout_reset_on(&ioat->poll_timer, 1, 1110 ioat_poll_timer_callback, ioat, ioat->cpu); 1111 } 1112 } 1113 mtx_unlock(&ioat->submit_lock); 1114} 1115 1116static struct ioat_descriptor * 1117ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 1118 uint32_t size, uint64_t src, uint64_t dst, 1119 bus_dmaengine_callback_t callback_fn, void *callback_arg, 1120 uint32_t flags) 1121{ 1122 struct ioat_generic_hw_descriptor *hw_desc; 1123 struct ioat_descriptor *desc; 1124 bus_dma_segment_t seg; 1125 int mflags, nseg, error; 1126 1127 mtx_assert(&ioat->submit_lock, MA_OWNED); 1128 1129 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 1130 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 1131 KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)", 1132 __func__, (unsigned)size, ioat->max_xfer_size)); 1133 1134 if ((flags & DMA_NO_WAIT) != 0) 1135 mflags = M_NOWAIT; 1136 else 1137 mflags = M_WAITOK; 1138 1139 if (ioat_reserve_space(ioat, 1, mflags) != 0) 1140 return (NULL); 1141 1142 desc = ioat_get_ring_entry(ioat, ioat->head); 1143 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic; 1144 1145 hw_desc->u.control_raw = 0; 1146 hw_desc->u.control_generic.op = op; 1147 hw_desc->u.control_generic.completion_update = 1; 1148 1149 if ((flags & DMA_INT_EN) != 0) 1150 hw_desc->u.control_generic.int_enable = 1; 1151 if ((flags & DMA_FENCE) != 0) 1152 hw_desc->u.control_generic.fence = 1; 1153 1154 hw_desc->size = size; 1155 1156 if (src != 0) { 1157 nseg = -1; 1158 error = _bus_dmamap_load_phys(ioat->data_tag, desc->src_dmamap, 1159 src, size, 0, &seg, &nseg); 1160 if (error != 0) { 1161 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1162 " failed %d\n", __func__, error); 1163 return (NULL); 1164 } 1165 hw_desc->src_addr = seg.ds_addr; 1166 } 1167 1168 if (dst != 0) { 1169 nseg = -1; 1170 error = _bus_dmamap_load_phys(ioat->data_tag, desc->dst_dmamap, 1171 dst, size, 0, &seg, &nseg); 1172 if (error != 0) { 1173 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1174 " failed %d\n", __func__, error); 1175 return (NULL); 1176 } 1177 hw_desc->dest_addr = seg.ds_addr; 1178 } 1179 1180 desc->bus_dmadesc.callback_fn = callback_fn; 1181 desc->bus_dmadesc.callback_arg = callback_arg; 1182 return (desc); 1183} 1184 1185struct bus_dmadesc * 1186ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 1187 void *callback_arg, uint32_t flags) 1188{ 1189 struct ioat_dma_hw_descriptor *hw_desc; 1190 struct ioat_descriptor *desc; 1191 struct ioat_softc *ioat; 1192 1193 ioat = to_ioat_softc(dmaengine); 1194 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1195 1196 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 1197 callback_arg, flags); 1198 if (desc == NULL) 1199 return (NULL); 1200 1201 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1202 hw_desc->u.control.null = 1; 1203 ioat_submit_single(ioat); 1204 return (&desc->bus_dmadesc); 1205} 1206 1207struct bus_dmadesc * 1208ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 1209 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 1210 void *callback_arg, uint32_t flags) 1211{ 1212 struct ioat_dma_hw_descriptor *hw_desc; 1213 struct ioat_descriptor *desc; 1214 struct ioat_softc *ioat; 1215 1216 ioat = to_ioat_softc(dmaengine); 1217 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 1218 callback_arg, flags); 1219 if (desc == NULL) 1220 return (NULL); 1221 1222 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1223 if (g_ioat_debug_level >= 3) 1224 dump_descriptor(hw_desc); 1225 1226 ioat_submit_single(ioat); 1227 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx", 1228 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len); 1229 return (&desc->bus_dmadesc); 1230} 1231 1232struct bus_dmadesc * 1233ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1234 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1235 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1236{ 1237 struct ioat_dma_hw_descriptor *hw_desc; 1238 struct ioat_descriptor *desc; 1239 struct ioat_softc *ioat; 1240 bus_size_t src1_len, dst1_len; 1241 bus_dma_segment_t seg; 1242 int nseg, error; 1243 1244 ioat = to_ioat_softc(dmaengine); 1245 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1246 1247 KASSERT(((src1 | src2 | dst1 | dst2) & PAGE_MASK) == 0, 1248 ("%s: addresses are not page-aligned", __func__)); 1249 1250 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, 0, 0, 1251 callback_fn, callback_arg, flags); 1252 if (desc == NULL) 1253 return (NULL); 1254 1255 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1256 1257 src1_len = (src2 != src1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE; 1258 nseg = -1; 1259 error = _bus_dmamap_load_phys(ioat->data_tag, 1260 desc->src_dmamap, src1, src1_len, 0, &seg, &nseg); 1261 if (error != 0) { 1262 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1263 " failed %d\n", __func__, error); 1264 return (NULL); 1265 } 1266 hw_desc->src_addr = seg.ds_addr; 1267 if (src1_len != 2 * PAGE_SIZE) { 1268 hw_desc->u.control.src_page_break = 1; 1269 nseg = -1; 1270 error = _bus_dmamap_load_phys(ioat->data_tag, 1271 desc->src2_dmamap, src2, PAGE_SIZE, 0, &seg, &nseg); 1272 if (error != 0) { 1273 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1274 " failed %d\n", __func__, error); 1275 return (NULL); 1276 } 1277 hw_desc->next_src_addr = seg.ds_addr; 1278 } 1279 1280 dst1_len = (dst2 != dst1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE; 1281 nseg = -1; 1282 error = _bus_dmamap_load_phys(ioat->data_tag, 1283 desc->dst_dmamap, dst1, dst1_len, 0, &seg, &nseg); 1284 if (error != 0) { 1285 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1286 " failed %d\n", __func__, error); 1287 return (NULL); 1288 } 1289 hw_desc->dest_addr = seg.ds_addr; 1290 if (dst1_len != 2 * PAGE_SIZE) { 1291 hw_desc->u.control.dest_page_break = 1; 1292 nseg = -1; 1293 error = _bus_dmamap_load_phys(ioat->data_tag, 1294 desc->dst2_dmamap, dst2, PAGE_SIZE, 0, &seg, &nseg); 1295 if (error != 0) { 1296 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1297 " failed %d\n", __func__, error); 1298 return (NULL); 1299 } 1300 hw_desc->next_dest_addr = seg.ds_addr; 1301 } 1302 1303 if (g_ioat_debug_level >= 3) 1304 dump_descriptor(hw_desc); 1305 1306 ioat_submit_single(ioat); 1307 return (&desc->bus_dmadesc); 1308} 1309 1310struct bus_dmadesc * 1311ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1312 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1313 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1314{ 1315 struct ioat_crc32_hw_descriptor *hw_desc; 1316 struct ioat_descriptor *desc; 1317 struct ioat_softc *ioat; 1318 uint32_t teststore; 1319 uint8_t op; 1320 bus_dma_segment_t seg; 1321 int nseg, error; 1322 1323 ioat = to_ioat_softc(dmaengine); 1324 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1325 1326 KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0, 1327 ("%s: device lacks MOVECRC capability", __func__)); 1328 teststore = (flags & _DMA_CRC_TESTSTORE); 1329 KASSERT(teststore != _DMA_CRC_TESTSTORE, 1330 ("%s: TEST and STORE invalid", __func__)); 1331 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1332 ("%s: INLINE invalid without TEST or STORE", __func__)); 1333 1334 switch (teststore) { 1335 case DMA_CRC_STORE: 1336 op = IOAT_OP_MOVECRC_STORE; 1337 break; 1338 case DMA_CRC_TEST: 1339 op = IOAT_OP_MOVECRC_TEST; 1340 break; 1341 default: 1342 KASSERT(teststore == 0, ("bogus")); 1343 op = IOAT_OP_MOVECRC; 1344 break; 1345 } 1346 1347 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1348 callback_arg, flags & ~_DMA_CRC_FLAGS); 1349 if (desc == NULL) 1350 return (NULL); 1351 1352 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1353 1354 if ((flags & DMA_CRC_INLINE) == 0) { 1355 nseg = -1; 1356 error = _bus_dmamap_load_phys(ioat->data_tag, 1357 desc->dst2_dmamap, crcptr, sizeof(uint32_t), 0, 1358 &seg, &nseg); 1359 if (error != 0) { 1360 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1361 " failed %d\n", __func__, error); 1362 return (NULL); 1363 } 1364 hw_desc->crc_address = seg.ds_addr; 1365 } else 1366 hw_desc->u.control.crc_location = 1; 1367 1368 if (initialseed != NULL) { 1369 hw_desc->u.control.use_seed = 1; 1370 hw_desc->seed = *initialseed; 1371 } 1372 1373 if (g_ioat_debug_level >= 3) 1374 dump_descriptor(hw_desc); 1375 1376 ioat_submit_single(ioat); 1377 return (&desc->bus_dmadesc); 1378} 1379 1380struct bus_dmadesc * 1381ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1382 uint32_t *initialseed, bus_addr_t crcptr, 1383 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1384{ 1385 struct ioat_crc32_hw_descriptor *hw_desc; 1386 struct ioat_descriptor *desc; 1387 struct ioat_softc *ioat; 1388 uint32_t teststore; 1389 uint8_t op; 1390 bus_dma_segment_t seg; 1391 int nseg, error; 1392 1393 ioat = to_ioat_softc(dmaengine); 1394 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1395 1396 KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0, 1397 ("%s: device lacks CRC capability", __func__)); 1398 teststore = (flags & _DMA_CRC_TESTSTORE); 1399 KASSERT(teststore != _DMA_CRC_TESTSTORE, 1400 ("%s: TEST and STORE invalid", __func__)); 1401 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1402 ("%s: INLINE invalid without TEST or STORE", __func__)); 1403 1404 switch (teststore) { 1405 case DMA_CRC_STORE: 1406 op = IOAT_OP_CRC_STORE; 1407 break; 1408 case DMA_CRC_TEST: 1409 op = IOAT_OP_CRC_TEST; 1410 break; 1411 default: 1412 KASSERT(teststore == 0, ("bogus")); 1413 op = IOAT_OP_CRC; 1414 break; 1415 } 1416 1417 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1418 callback_arg, flags & ~_DMA_CRC_FLAGS); 1419 if (desc == NULL) 1420 return (NULL); 1421 1422 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1423 1424 if ((flags & DMA_CRC_INLINE) == 0) { 1425 nseg = -1; 1426 error = _bus_dmamap_load_phys(ioat->data_tag, 1427 desc->dst2_dmamap, crcptr, sizeof(uint32_t), 0, 1428 &seg, &nseg); 1429 if (error != 0) { 1430 ioat_log_message(0, "%s: _bus_dmamap_load_phys" 1431 " failed %d\n", __func__, error); 1432 return (NULL); 1433 } 1434 hw_desc->crc_address = seg.ds_addr; 1435 } else 1436 hw_desc->u.control.crc_location = 1; 1437 1438 if (initialseed != NULL) { 1439 hw_desc->u.control.use_seed = 1; 1440 hw_desc->seed = *initialseed; 1441 } 1442 1443 if (g_ioat_debug_level >= 3) 1444 dump_descriptor(hw_desc); 1445 1446 ioat_submit_single(ioat); 1447 return (&desc->bus_dmadesc); 1448} 1449 1450struct bus_dmadesc * 1451ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1452 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1453 uint32_t flags) 1454{ 1455 struct ioat_fill_hw_descriptor *hw_desc; 1456 struct ioat_descriptor *desc; 1457 struct ioat_softc *ioat; 1458 1459 ioat = to_ioat_softc(dmaengine); 1460 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1461 1462 KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0, 1463 ("%s: device lacks BFILL capability", __func__)); 1464 1465 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, 0, dst, 1466 callback_fn, callback_arg, flags); 1467 if (desc == NULL) 1468 return (NULL); 1469 1470 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill; 1471 hw_desc->src_data = fillpattern; 1472 if (g_ioat_debug_level >= 3) 1473 dump_descriptor(hw_desc); 1474 1475 ioat_submit_single(ioat); 1476 return (&desc->bus_dmadesc); 1477} 1478 1479/* 1480 * Ring Management 1481 */ 1482static inline uint32_t 1483ioat_get_active(struct ioat_softc *ioat) 1484{ 1485 1486 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1487} 1488 1489static inline uint32_t 1490ioat_get_ring_space(struct ioat_softc *ioat) 1491{ 1492 1493 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1494} 1495 1496/* 1497 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1498 * for 'num_descs'. 1499 * 1500 * If mflags contains M_WAITOK, blocks until enough space is available. 1501 * 1502 * Returns zero on success, or an errno on error. If num_descs is beyond the 1503 * maximum ring size, returns EINVAl; if allocation would block and mflags 1504 * contains M_NOWAIT, returns EAGAIN. 1505 * 1506 * Must be called with the submit_lock held; returns with the lock held. The 1507 * lock may be dropped to allocate the ring. 1508 * 1509 * (The submit_lock is needed to add any entries to the ring, so callers are 1510 * assured enough room is available.) 1511 */ 1512static int 1513ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1514{ 1515 boolean_t dug; 1516 int error; 1517 1518 mtx_assert(&ioat->submit_lock, MA_OWNED); 1519 error = 0; 1520 dug = FALSE; 1521 1522 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) { 1523 error = EINVAL; 1524 goto out; 1525 } 1526 1527 for (;;) { 1528 if (ioat->quiescing) { 1529 error = ENXIO; 1530 goto out; 1531 } 1532 1533 if (ioat_get_ring_space(ioat) >= num_descs) 1534 goto out; 1535 1536 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__, 1537 ioat->chan_idx, num_descs); 1538 1539 if (!dug && !ioat->is_submitter_processing) { 1540 ioat->is_submitter_processing = TRUE; 1541 mtx_unlock(&ioat->submit_lock); 1542 1543 CTR2(KTR_IOAT, "%s channel=%u attempting to process events", 1544 __func__, ioat->chan_idx); 1545 ioat_process_events(ioat, FALSE); 1546 1547 mtx_lock(&ioat->submit_lock); 1548 dug = TRUE; 1549 KASSERT(ioat->is_submitter_processing == TRUE, 1550 ("is_submitter_processing")); 1551 ioat->is_submitter_processing = FALSE; 1552 wakeup(&ioat->tail); 1553 continue; 1554 } 1555 1556 if ((mflags & M_WAITOK) == 0) { 1557 error = EAGAIN; 1558 break; 1559 } 1560 CTR2(KTR_IOAT, "%s channel=%u blocking on completions", 1561 __func__, ioat->chan_idx); 1562 msleep(&ioat->tail, &ioat->submit_lock, 0, 1563 "ioat_full", 0); 1564 continue; 1565 } 1566 1567out: 1568 mtx_assert(&ioat->submit_lock, MA_OWNED); 1569 KASSERT(!ioat->quiescing || error == ENXIO, 1570 ("reserved during quiesce")); 1571 return (error); 1572} 1573 1574static void 1575ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1576 struct ioat_descriptor *ring) 1577{ 1578 1579 free_domain(ring, M_IOAT); 1580} 1581 1582static struct ioat_descriptor * 1583ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1584{ 1585 1586 return (&ioat->ring[index % (1 << ioat->ring_size_order)]); 1587} 1588 1589static union ioat_hw_descriptor * 1590ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index) 1591{ 1592 1593 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]); 1594} 1595 1596static void 1597ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1598{ 1599 union ioat_hw_descriptor *desc; 1600 1601 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1602 IOAT_CHANERR_STR); 1603 if (chanerr == 0) 1604 return; 1605 1606 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1607 1608 desc = ioat_get_descriptor(ioat, ioat->tail + 0); 1609 dump_descriptor(desc); 1610 1611 desc = ioat_get_descriptor(ioat, ioat->tail + 1); 1612 dump_descriptor(desc); 1613} 1614 1615static void 1616ioat_poll_timer_callback(void *arg) 1617{ 1618 struct ioat_softc *ioat; 1619 1620 ioat = arg; 1621 CTR1(KTR_IOAT, "%s", __func__); 1622 1623 ioat_process_events(ioat, FALSE); 1624 1625 mtx_lock(&ioat->submit_lock); 1626 if (ioat_get_active(ioat) > 0) 1627 callout_schedule(&ioat->poll_timer, 1); 1628 mtx_unlock(&ioat->submit_lock); 1629} 1630 1631/* 1632 * Support Functions 1633 */ 1634static void 1635ioat_submit_single(struct ioat_softc *ioat) 1636{ 1637 1638 mtx_assert(&ioat->submit_lock, MA_OWNED); 1639 1640 ioat->head++; 1641 CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__, 1642 ioat->chan_idx, ioat->head, ioat->tail); 1643 1644 ioat->stats.descriptors_submitted++; 1645} 1646 1647static int 1648ioat_reset_hw(struct ioat_softc *ioat) 1649{ 1650 uint64_t status; 1651 uint32_t chanerr; 1652 unsigned timeout; 1653 int error; 1654 1655 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1656 1657 mtx_lock(&ioat->submit_lock); 1658 while (ioat->resetting && !ioat->destroying) 1659 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0); 1660 if (ioat->destroying) { 1661 mtx_unlock(&ioat->submit_lock); 1662 return (ENXIO); 1663 } 1664 ioat->resetting = TRUE; 1665 ioat->quiescing = TRUE; 1666 mtx_unlock(&ioat->submit_lock); 1667 mtx_lock(&ioat->cleanup_lock); 1668 while (ioat_get_active(ioat) > 0) 1669 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 1670 1671 /* 1672 * Suspend ioat_process_events while the hardware and softc are in an 1673 * indeterminate state. 1674 */ 1675 ioat->resetting_cleanup = TRUE; 1676 mtx_unlock(&ioat->cleanup_lock); 1677 1678 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__, 1679 ioat->chan_idx); 1680 1681 status = ioat_get_chansts(ioat); 1682 if (is_ioat_active(status) || is_ioat_idle(status)) 1683 ioat_suspend(ioat); 1684 1685 /* Wait at most 20 ms */ 1686 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1687 timeout < 20; timeout++) { 1688 DELAY(1000); 1689 status = ioat_get_chansts(ioat); 1690 } 1691 if (timeout == 20) { 1692 error = ETIMEDOUT; 1693 goto out; 1694 } 1695 1696 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1697 1698 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1699 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1700 1701 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__, 1702 ioat->chan_idx); 1703 1704 /* 1705 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1706 * that can cause stability issues for IOAT v3. 1707 */ 1708 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1709 4); 1710 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1711 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1712 1713 /* 1714 * BDXDE and BWD models reset MSI-X registers on device reset. 1715 * Save/restore their contents manually. 1716 */ 1717 if (ioat_model_resets_msix(ioat)) { 1718 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1719 pci_save_state(ioat->device); 1720 } 1721 1722 ioat_reset(ioat); 1723 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__, 1724 ioat->chan_idx); 1725 1726 /* Wait at most 20 ms */ 1727 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1728 DELAY(1000); 1729 if (timeout == 20) { 1730 error = ETIMEDOUT; 1731 goto out; 1732 } 1733 1734 if (ioat_model_resets_msix(ioat)) { 1735 ioat_log_message(1, "device resets registers; restored\n"); 1736 pci_restore_state(ioat->device); 1737 } 1738 1739 /* Reset attempts to return the hardware to "halted." */ 1740 status = ioat_get_chansts(ioat); 1741 if (is_ioat_active(status) || is_ioat_idle(status)) { 1742 /* So this really shouldn't happen... */ 1743 ioat_log_message(0, "Device is active after a reset?\n"); 1744 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1745 error = 0; 1746 goto out; 1747 } 1748 1749 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1750 if (chanerr != 0) { 1751 mtx_lock(&ioat->cleanup_lock); 1752 ioat_halted_debug(ioat, chanerr); 1753 mtx_unlock(&ioat->cleanup_lock); 1754 error = EIO; 1755 goto out; 1756 } 1757 1758 /* 1759 * Bring device back online after reset. Writing CHAINADDR brings the 1760 * device back to active. 1761 * 1762 * The internal ring counter resets to zero, so we have to start over 1763 * at zero as well. 1764 */ 1765 ioat->tail = ioat->head = 0; 1766 *ioat->comp_update = ioat->last_seen = 1767 RING_PHYS_ADDR(ioat, ioat->tail - 1); 1768 1769 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1770 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1771 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0)); 1772 error = 0; 1773 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__, 1774 ioat->chan_idx); 1775 1776out: 1777 /* Enqueues a null operation and ensures it completes. */ 1778 if (error == 0) { 1779 error = ioat_start_channel(ioat); 1780 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__, 1781 ioat->chan_idx); 1782 } 1783 1784 /* 1785 * Resume completions now that ring state is consistent. 1786 */ 1787 mtx_lock(&ioat->cleanup_lock); 1788 ioat->resetting_cleanup = FALSE; 1789 mtx_unlock(&ioat->cleanup_lock); 1790 1791 /* Unblock submission of new work */ 1792 mtx_lock(&ioat->submit_lock); 1793 ioat->quiescing = FALSE; 1794 wakeup(&ioat->quiescing); 1795 1796 ioat->resetting = FALSE; 1797 wakeup(&ioat->resetting); 1798 1799 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx); 1800 mtx_unlock(&ioat->submit_lock); 1801 1802 return (error); 1803} 1804 1805static int 1806sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1807{ 1808 struct ioat_softc *ioat; 1809 struct sbuf sb; 1810 uint64_t status; 1811 int error; 1812 1813 ioat = arg1; 1814 1815 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1816 1817 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1818 switch (status) { 1819 case IOAT_CHANSTS_ACTIVE: 1820 sbuf_printf(&sb, "ACTIVE"); 1821 break; 1822 case IOAT_CHANSTS_IDLE: 1823 sbuf_printf(&sb, "IDLE"); 1824 break; 1825 case IOAT_CHANSTS_SUSPENDED: 1826 sbuf_printf(&sb, "SUSPENDED"); 1827 break; 1828 case IOAT_CHANSTS_HALTED: 1829 sbuf_printf(&sb, "HALTED"); 1830 break; 1831 case IOAT_CHANSTS_ARMED: 1832 sbuf_printf(&sb, "ARMED"); 1833 break; 1834 default: 1835 sbuf_printf(&sb, "UNKNOWN"); 1836 break; 1837 } 1838 error = sbuf_finish(&sb); 1839 sbuf_delete(&sb); 1840 1841 if (error != 0 || req->newptr == NULL) 1842 return (error); 1843 return (EINVAL); 1844} 1845 1846static int 1847sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1848{ 1849 struct ioat_softc *ioat; 1850 struct sbuf sb; 1851#define PRECISION "1" 1852 const uintmax_t factor = 10; 1853 uintmax_t rate; 1854 int error; 1855 1856 ioat = arg1; 1857 sbuf_new_for_sysctl(&sb, NULL, 16, req); 1858 1859 if (ioat->stats.interrupts == 0) { 1860 sbuf_printf(&sb, "NaN"); 1861 goto out; 1862 } 1863 rate = ioat->stats.descriptors_processed * factor / 1864 ioat->stats.interrupts; 1865 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1866 rate % factor); 1867#undef PRECISION 1868out: 1869 error = sbuf_finish(&sb); 1870 sbuf_delete(&sb); 1871 if (error != 0 || req->newptr == NULL) 1872 return (error); 1873 return (EINVAL); 1874} 1875 1876static int 1877sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1878{ 1879 struct ioat_softc *ioat; 1880 int error, arg; 1881 1882 ioat = arg1; 1883 1884 arg = 0; 1885 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1886 if (error != 0 || req->newptr == NULL) 1887 return (error); 1888 1889 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1890 if (error != 0) 1891 return (error); 1892 1893 if (arg != 0) 1894 error = ioat_reset_hw(ioat); 1895 1896 return (error); 1897} 1898 1899static void 1900dump_descriptor(void *hw_desc) 1901{ 1902 int i, j; 1903 1904 for (i = 0; i < 2; i++) { 1905 for (j = 0; j < 8; j++) 1906 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1907 printf("\n"); 1908 } 1909} 1910 1911static void 1912ioat_setup_sysctl(device_t device) 1913{ 1914 struct sysctl_oid_list *par, *statpar, *state, *hammer; 1915 struct sysctl_ctx_list *ctx; 1916 struct sysctl_oid *tree, *tmp; 1917 struct ioat_softc *ioat; 1918 1919 ioat = DEVICE2SOFTC(device); 1920 ctx = device_get_sysctl_ctx(device); 1921 tree = device_get_sysctl_tree(device); 1922 par = SYSCTL_CHILDREN(tree); 1923 1924 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1925 &ioat->version, 0, "HW version (0xMM form)"); 1926 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1927 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1928 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 1929 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 1930 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 1931 &ioat->intrdelay_max, 0, 1932 "Maximum configurable INTRDELAY on this channel (microseconds)"); 1933 1934 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 1935 "IOAT channel internal state"); 1936 state = SYSCTL_CHILDREN(tmp); 1937 1938 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1939 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1940 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 1941 0, "SW descriptor head pointer index"); 1942 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 1943 0, "SW descriptor tail pointer index"); 1944 1945 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 1946 ioat->comp_update, "HW addr of last completion"); 1947 1948 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing", 1949 CTLFLAG_RD, &ioat->is_submitter_processing, 0, 1950 "submitter processing"); 1951 1952 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 1953 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1954 "String of the channel status"); 1955 1956 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 1957 &ioat->cached_intrdelay, 0, 1958 "Current INTRDELAY on this channel (cached, microseconds)"); 1959 1960 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 1961 "Big hammers (mostly for testing)"); 1962 hammer = SYSCTL_CHILDREN(tmp); 1963 1964 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 1965 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1966 "Set to non-zero to reset the hardware"); 1967 1968 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 1969 "IOAT channel statistics"); 1970 statpar = SYSCTL_CHILDREN(tmp); 1971 1972 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 1973 &ioat->stats.interrupts, 1974 "Number of interrupts processed on this channel"); 1975 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 1976 &ioat->stats.descriptors_processed, 1977 "Number of descriptors processed on this channel"); 1978 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 1979 &ioat->stats.descriptors_submitted, 1980 "Number of descriptors submitted to this channel"); 1981 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 1982 &ioat->stats.descriptors_error, 1983 "Number of descriptors failed by channel errors"); 1984 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 1985 &ioat->stats.channel_halts, 0, 1986 "Number of times the channel has halted"); 1987 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 1988 &ioat->stats.last_halt_chanerr, 0, 1989 "The raw CHANERR when the channel was last halted"); 1990 1991 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 1992 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 1993 "Descriptors per interrupt"); 1994} 1995 1996static void 1997ioat_get(struct ioat_softc *ioat) 1998{ 1999 2000 mtx_assert(&ioat->submit_lock, MA_OWNED); 2001 KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow")); 2002 2003 ioat->refcnt++; 2004} 2005 2006static void 2007ioat_put(struct ioat_softc *ioat) 2008{ 2009 2010 mtx_assert(&ioat->submit_lock, MA_OWNED); 2011 KASSERT(ioat->refcnt >= 1, ("refcnt error")); 2012 2013 if (--ioat->refcnt == 0) 2014 wakeup(&ioat->refcnt); 2015} 2016 2017static void 2018ioat_drain_locked(struct ioat_softc *ioat) 2019{ 2020 2021 mtx_assert(&ioat->submit_lock, MA_OWNED); 2022 2023 while (ioat->refcnt > 0) 2024 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0); 2025} 2026 2027#ifdef DDB 2028#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 2029#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 2030DB_SHOW_COMMAND(ioat, db_show_ioat) 2031{ 2032 struct ioat_softc *sc; 2033 unsigned idx; 2034 2035 if (!have_addr) 2036 goto usage; 2037 idx = (unsigned)addr; 2038 if (idx >= ioat_channel_index) 2039 goto usage; 2040 2041 sc = ioat_channel[idx]; 2042 db_printf("ioat softc at %p\n", sc); 2043 if (sc == NULL) 2044 return; 2045 2046 db_printf(" version: %d\n", sc->version); 2047 db_printf(" chan_idx: %u\n", sc->chan_idx); 2048 db_printf(" submit_lock: "); 2049 db_show_lock(&sc->submit_lock); 2050 2051 db_printf(" capabilities: %b\n", (int)sc->capabilities, 2052 IOAT_DMACAP_STR); 2053 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 2054 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 2055 2056 db_printf(" poll_timer:\n"); 2057 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 2058 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 2059 db_printf(" c_func: %p\n", sc->poll_timer.c_func); 2060 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 2061 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 2062 2063 db_printf(" quiescing: %d\n", (int)sc->quiescing); 2064 db_printf(" destroying: %d\n", (int)sc->destroying); 2065 db_printf(" is_submitter_processing: %d\n", 2066 (int)sc->is_submitter_processing); 2067 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 2068 db_printf(" resetting: %d\n", (int)sc->resetting); 2069 2070 db_printf(" head: %u\n", sc->head); 2071 db_printf(" tail: %u\n", sc->tail); 2072 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 2073 db_printf(" last_seen: 0x%lx\n", sc->last_seen); 2074 db_printf(" ring: %p\n", sc->ring); 2075 db_printf(" descriptors: %p\n", sc->hw_desc_ring); 2076 db_printf(" descriptors (phys): 0x%jx\n", 2077 (uintmax_t)sc->hw_desc_bus_addr); 2078 2079 db_printf(" ring[%u] (tail):\n", sc->tail % 2080 (1 << sc->ring_size_order)); 2081 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id); 2082 db_printf(" addr: 0x%lx\n", 2083 RING_PHYS_ADDR(sc, sc->tail)); 2084 db_printf(" next: 0x%lx\n", 2085 ioat_get_descriptor(sc, sc->tail)->generic.next); 2086 2087 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) % 2088 (1 << sc->ring_size_order)); 2089 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id); 2090 db_printf(" addr: 0x%lx\n", 2091 RING_PHYS_ADDR(sc, sc->head - 1)); 2092 db_printf(" next: 0x%lx\n", 2093 ioat_get_descriptor(sc, sc->head - 1)->generic.next); 2094 2095 db_printf(" ring[%u] (head):\n", (sc->head) % 2096 (1 << sc->ring_size_order)); 2097 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id); 2098 db_printf(" addr: 0x%lx\n", 2099 RING_PHYS_ADDR(sc, sc->head)); 2100 db_printf(" next: 0x%lx\n", 2101 ioat_get_descriptor(sc, sc->head)->generic.next); 2102 2103 for (idx = 0; idx < (1 << sc->ring_size_order); idx++) 2104 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK) 2105 == RING_PHYS_ADDR(sc, idx)) 2106 db_printf(" ring[%u] == hardware tail\n", idx); 2107 2108 db_printf(" cleanup_lock: "); 2109 db_show_lock(&sc->cleanup_lock); 2110 2111 db_printf(" refcnt: %u\n", sc->refcnt); 2112 db_printf(" stats:\n"); 2113 db_printf(" interrupts: %lu\n", sc->stats.interrupts); 2114 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 2115 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 2116 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 2117 2118 db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 2119 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 2120 2121 if (db_pager_quit) 2122 return; 2123 2124 db_printf(" hw status:\n"); 2125 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 2126 db_printf(" chanctrl: 0x%x\n", 2127 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 2128 db_printf(" chancmd: 0x%x\n", 2129 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 2130 db_printf(" dmacount: 0x%x\n", 2131 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 2132 db_printf(" chainaddr: 0x%lx\n", 2133 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 2134 db_printf(" chancmp: 0x%lx\n", 2135 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 2136 db_printf(" chanerr: %b\n", 2137 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 2138 return; 2139usage: 2140 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 2141 return; 2142} 2143#endif /* DDB */ 2144