ioat.c revision 356430
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/ioat/ioat.c 356430 2020-01-07 00:54:45Z mav $"); 30 31#include "opt_ddb.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/bus.h> 36#include <sys/conf.h> 37#include <sys/fail.h> 38#include <sys/ioccom.h> 39#include <sys/kernel.h> 40#include <sys/lock.h> 41#include <sys/malloc.h> 42#include <sys/module.h> 43#include <sys/mutex.h> 44#include <sys/rman.h> 45#include <sys/sbuf.h> 46#include <sys/sysctl.h> 47#include <sys/taskqueue.h> 48#include <sys/time.h> 49#include <dev/pci/pcireg.h> 50#include <dev/pci/pcivar.h> 51#include <machine/bus.h> 52#include <machine/resource.h> 53#include <machine/stdarg.h> 54 55#ifdef DDB 56#include <ddb/ddb.h> 57#endif 58 59#include "ioat.h" 60#include "ioat_hw.h" 61#include "ioat_internal.h" 62 63#ifndef BUS_SPACE_MAXADDR_40BIT 64#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL 65#endif 66 67static int ioat_probe(device_t device); 68static int ioat_attach(device_t device); 69static int ioat_detach(device_t device); 70static int ioat_setup_intr(struct ioat_softc *ioat); 71static int ioat_teardown_intr(struct ioat_softc *ioat); 72static int ioat3_attach(device_t device); 73static int ioat_start_channel(struct ioat_softc *ioat); 74static int ioat_map_pci_bar(struct ioat_softc *ioat); 75static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 76 int error); 77static void ioat_interrupt_handler(void *arg); 78static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 79static int chanerr_to_errno(uint32_t); 80static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr); 81static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 82static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 83static void ioat_free_ring(struct ioat_softc *, uint32_t size, 84 struct ioat_descriptor *); 85static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 86static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *, 87 uint32_t index); 88static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *, 89 uint32_t index); 90static void ioat_halted_debug(struct ioat_softc *, uint32_t); 91static void ioat_poll_timer_callback(void *arg); 92static void dump_descriptor(void *hw_desc); 93static void ioat_submit_single(struct ioat_softc *ioat); 94static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 95 int error); 96static int ioat_reset_hw(struct ioat_softc *ioat); 97static void ioat_reset_hw_task(void *, int); 98static void ioat_setup_sysctl(device_t device); 99static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 100static void ioat_get(struct ioat_softc *); 101static void ioat_put(struct ioat_softc *); 102static void ioat_drain_locked(struct ioat_softc *); 103 104#define ioat_log_message(v, ...) do { \ 105 if ((v) <= g_ioat_debug_level) { \ 106 device_printf(ioat->device, __VA_ARGS__); \ 107 } \ 108} while (0) 109 110MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 111SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 112 113static int g_force_legacy_interrupts; 114SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 115 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 116 117int g_ioat_debug_level = 0; 118SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 119 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 120 121unsigned g_ioat_ring_order = 13; 122SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order, 123 0, "Set IOAT ring order. (1 << this) == ring size."); 124 125/* 126 * OS <-> Driver interface structures 127 */ 128static device_method_t ioat_pci_methods[] = { 129 /* Device interface */ 130 DEVMETHOD(device_probe, ioat_probe), 131 DEVMETHOD(device_attach, ioat_attach), 132 DEVMETHOD(device_detach, ioat_detach), 133 DEVMETHOD_END 134}; 135 136static driver_t ioat_pci_driver = { 137 "ioat", 138 ioat_pci_methods, 139 sizeof(struct ioat_softc), 140}; 141 142static devclass_t ioat_devclass; 143DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 144MODULE_VERSION(ioat, 1); 145 146/* 147 * Private data structures 148 */ 149static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 150static unsigned ioat_channel_index = 0; 151SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 152 "Number of IOAT channels attached"); 153static struct mtx ioat_list_mtx; 154MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF); 155 156static struct _pcsid 157{ 158 u_int32_t type; 159 const char *desc; 160} pci_ids[] = { 161 { 0x34308086, "TBG IOAT Ch0" }, 162 { 0x34318086, "TBG IOAT Ch1" }, 163 { 0x34328086, "TBG IOAT Ch2" }, 164 { 0x34338086, "TBG IOAT Ch3" }, 165 { 0x34298086, "TBG IOAT Ch4" }, 166 { 0x342a8086, "TBG IOAT Ch5" }, 167 { 0x342b8086, "TBG IOAT Ch6" }, 168 { 0x342c8086, "TBG IOAT Ch7" }, 169 170 { 0x37108086, "JSF IOAT Ch0" }, 171 { 0x37118086, "JSF IOAT Ch1" }, 172 { 0x37128086, "JSF IOAT Ch2" }, 173 { 0x37138086, "JSF IOAT Ch3" }, 174 { 0x37148086, "JSF IOAT Ch4" }, 175 { 0x37158086, "JSF IOAT Ch5" }, 176 { 0x37168086, "JSF IOAT Ch6" }, 177 { 0x37178086, "JSF IOAT Ch7" }, 178 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 179 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 180 181 { 0x3c208086, "SNB IOAT Ch0" }, 182 { 0x3c218086, "SNB IOAT Ch1" }, 183 { 0x3c228086, "SNB IOAT Ch2" }, 184 { 0x3c238086, "SNB IOAT Ch3" }, 185 { 0x3c248086, "SNB IOAT Ch4" }, 186 { 0x3c258086, "SNB IOAT Ch5" }, 187 { 0x3c268086, "SNB IOAT Ch6" }, 188 { 0x3c278086, "SNB IOAT Ch7" }, 189 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 190 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 191 192 { 0x0e208086, "IVB IOAT Ch0" }, 193 { 0x0e218086, "IVB IOAT Ch1" }, 194 { 0x0e228086, "IVB IOAT Ch2" }, 195 { 0x0e238086, "IVB IOAT Ch3" }, 196 { 0x0e248086, "IVB IOAT Ch4" }, 197 { 0x0e258086, "IVB IOAT Ch5" }, 198 { 0x0e268086, "IVB IOAT Ch6" }, 199 { 0x0e278086, "IVB IOAT Ch7" }, 200 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 201 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 202 203 { 0x2f208086, "HSW IOAT Ch0" }, 204 { 0x2f218086, "HSW IOAT Ch1" }, 205 { 0x2f228086, "HSW IOAT Ch2" }, 206 { 0x2f238086, "HSW IOAT Ch3" }, 207 { 0x2f248086, "HSW IOAT Ch4" }, 208 { 0x2f258086, "HSW IOAT Ch5" }, 209 { 0x2f268086, "HSW IOAT Ch6" }, 210 { 0x2f278086, "HSW IOAT Ch7" }, 211 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 212 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 213 214 { 0x0c508086, "BWD IOAT Ch0" }, 215 { 0x0c518086, "BWD IOAT Ch1" }, 216 { 0x0c528086, "BWD IOAT Ch2" }, 217 { 0x0c538086, "BWD IOAT Ch3" }, 218 219 { 0x6f508086, "BDXDE IOAT Ch0" }, 220 { 0x6f518086, "BDXDE IOAT Ch1" }, 221 { 0x6f528086, "BDXDE IOAT Ch2" }, 222 { 0x6f538086, "BDXDE IOAT Ch3" }, 223 224 { 0x6f208086, "BDX IOAT Ch0" }, 225 { 0x6f218086, "BDX IOAT Ch1" }, 226 { 0x6f228086, "BDX IOAT Ch2" }, 227 { 0x6f238086, "BDX IOAT Ch3" }, 228 { 0x6f248086, "BDX IOAT Ch4" }, 229 { 0x6f258086, "BDX IOAT Ch5" }, 230 { 0x6f268086, "BDX IOAT Ch6" }, 231 { 0x6f278086, "BDX IOAT Ch7" }, 232 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 233 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 234 { 0x20218086, "SKX IOAT" }, 235 { 0x00000000, NULL } 236}; 237 238/* 239 * OS <-> Driver linkage functions 240 */ 241static int 242ioat_probe(device_t device) 243{ 244 struct _pcsid *ep; 245 u_int32_t type; 246 247 type = pci_get_devid(device); 248 for (ep = pci_ids; ep->type; ep++) { 249 if (ep->type == type) { 250 device_set_desc(device, ep->desc); 251 return (0); 252 } 253 } 254 return (ENXIO); 255} 256 257static int 258ioat_attach(device_t device) 259{ 260 struct ioat_softc *ioat; 261 int error, i; 262 263 ioat = DEVICE2SOFTC(device); 264 ioat->device = device; 265 266 error = ioat_map_pci_bar(ioat); 267 if (error != 0) 268 goto err; 269 270 ioat->version = ioat_read_cbver(ioat); 271 if (ioat->version < IOAT_VER_3_0) { 272 error = ENODEV; 273 goto err; 274 } 275 276 error = ioat3_attach(device); 277 if (error != 0) 278 goto err; 279 280 error = pci_enable_busmaster(device); 281 if (error != 0) 282 goto err; 283 284 error = ioat_setup_intr(ioat); 285 if (error != 0) 286 goto err; 287 288 error = ioat_reset_hw(ioat); 289 if (error != 0) 290 goto err; 291 292 ioat_process_events(ioat, FALSE); 293 ioat_setup_sysctl(device); 294 295 mtx_lock(&ioat_list_mtx); 296 for (i = 0; i < IOAT_MAX_CHANNELS; i++) { 297 if (ioat_channel[i] == NULL) 298 break; 299 } 300 if (i >= IOAT_MAX_CHANNELS) { 301 mtx_unlock(&ioat_list_mtx); 302 device_printf(device, "Too many I/OAT devices in system\n"); 303 error = ENXIO; 304 goto err; 305 } 306 ioat->chan_idx = i; 307 ioat_channel[i] = ioat; 308 if (i >= ioat_channel_index) 309 ioat_channel_index = i + 1; 310 mtx_unlock(&ioat_list_mtx); 311 312 ioat_test_attach(); 313 314err: 315 if (error != 0) 316 ioat_detach(device); 317 return (error); 318} 319 320static int 321ioat_detach(device_t device) 322{ 323 struct ioat_softc *ioat; 324 325 ioat = DEVICE2SOFTC(device); 326 327 mtx_lock(&ioat_list_mtx); 328 ioat_channel[ioat->chan_idx] = NULL; 329 while (ioat_channel_index > 0 && 330 ioat_channel[ioat_channel_index - 1] == NULL) 331 ioat_channel_index--; 332 mtx_unlock(&ioat_list_mtx); 333 334 ioat_test_detach(); 335 taskqueue_drain(taskqueue_thread, &ioat->reset_task); 336 337 mtx_lock(&ioat->submit_lock); 338 ioat->quiescing = TRUE; 339 ioat->destroying = TRUE; 340 wakeup(&ioat->quiescing); 341 wakeup(&ioat->resetting); 342 343 ioat_drain_locked(ioat); 344 mtx_unlock(&ioat->submit_lock); 345 mtx_lock(&ioat->cleanup_lock); 346 while (ioat_get_active(ioat) > 0) 347 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 348 mtx_unlock(&ioat->cleanup_lock); 349 350 ioat_teardown_intr(ioat); 351 callout_drain(&ioat->poll_timer); 352 353 pci_disable_busmaster(device); 354 355 if (ioat->pci_resource != NULL) 356 bus_release_resource(device, SYS_RES_MEMORY, 357 ioat->pci_resource_id, ioat->pci_resource); 358 359 if (ioat->ring != NULL) 360 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 361 362 if (ioat->comp_update != NULL) { 363 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 364 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 365 ioat->comp_update_map); 366 bus_dma_tag_destroy(ioat->comp_update_tag); 367 } 368 369 if (ioat->hw_desc_ring != NULL) { 370 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map); 371 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring, 372 ioat->hw_desc_map); 373 bus_dma_tag_destroy(ioat->hw_desc_tag); 374 } 375 376 return (0); 377} 378 379static int 380ioat_teardown_intr(struct ioat_softc *ioat) 381{ 382 383 if (ioat->tag != NULL) 384 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 385 386 if (ioat->res != NULL) 387 bus_release_resource(ioat->device, SYS_RES_IRQ, 388 rman_get_rid(ioat->res), ioat->res); 389 390 pci_release_msi(ioat->device); 391 return (0); 392} 393 394static int 395ioat_start_channel(struct ioat_softc *ioat) 396{ 397 struct ioat_dma_hw_descriptor *hw_desc; 398 struct ioat_descriptor *desc; 399 struct bus_dmadesc *dmadesc; 400 uint64_t status; 401 uint32_t chanerr; 402 int i; 403 404 ioat_acquire(&ioat->dmaengine); 405 406 /* Submit 'NULL' operation manually to avoid quiescing flag */ 407 desc = ioat_get_ring_entry(ioat, ioat->head); 408 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma; 409 dmadesc = &desc->bus_dmadesc; 410 411 dmadesc->callback_fn = NULL; 412 dmadesc->callback_arg = NULL; 413 414 hw_desc->u.control_raw = 0; 415 hw_desc->u.control_generic.op = IOAT_OP_COPY; 416 hw_desc->u.control_generic.completion_update = 1; 417 hw_desc->size = 8; 418 hw_desc->src_addr = 0; 419 hw_desc->dest_addr = 0; 420 hw_desc->u.control.null = 1; 421 422 ioat_submit_single(ioat); 423 ioat_release(&ioat->dmaengine); 424 425 for (i = 0; i < 100; i++) { 426 DELAY(1); 427 status = ioat_get_chansts(ioat); 428 if (is_ioat_idle(status)) 429 return (0); 430 } 431 432 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 433 ioat_log_message(0, "could not start channel: " 434 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 435 IOAT_CHANERR_STR); 436 return (ENXIO); 437} 438 439/* 440 * Initialize Hardware 441 */ 442static int 443ioat3_attach(device_t device) 444{ 445 struct ioat_softc *ioat; 446 struct ioat_descriptor *ring; 447 struct ioat_dma_hw_descriptor *dma_hw_desc; 448 void *hw_desc; 449 size_t ringsz; 450 int i, num_descriptors; 451 int error; 452 uint8_t xfercap; 453 454 error = 0; 455 ioat = DEVICE2SOFTC(device); 456 ioat->capabilities = ioat_read_dmacapability(ioat); 457 458 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 459 IOAT_DMACAP_STR); 460 461 xfercap = ioat_read_xfercap(ioat); 462 ioat->max_xfer_size = 1 << xfercap; 463 464 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 465 IOAT_INTRDELAY_SUPPORTED) != 0; 466 if (ioat->intrdelay_supported) 467 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 468 469 /* TODO: need to check DCA here if we ever do XOR/PQ */ 470 471 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 472 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 473 callout_init(&ioat->poll_timer, 1); 474 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 475 476 /* Establish lock order for Witness */ 477 mtx_lock(&ioat->cleanup_lock); 478 mtx_lock(&ioat->submit_lock); 479 mtx_unlock(&ioat->submit_lock); 480 mtx_unlock(&ioat->cleanup_lock); 481 482 ioat->is_submitter_processing = FALSE; 483 484 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 485 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 486 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 487 &ioat->comp_update_tag); 488 489 error = bus_dmamem_alloc(ioat->comp_update_tag, 490 (void **)&ioat->comp_update, BUS_DMA_ZERO | BUS_DMA_WAITOK, 491 &ioat->comp_update_map); 492 if (ioat->comp_update == NULL) 493 return (ENOMEM); 494 495 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 496 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 497 BUS_DMA_NOWAIT); 498 if (error != 0) 499 return (error); 500 501 ioat->ring_size_order = g_ioat_ring_order; 502 num_descriptors = 1 << ioat->ring_size_order; 503 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors; 504 505 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 506 2 * 1024 * 1024, 0x0, (bus_addr_t)BUS_SPACE_MAXADDR_40BIT, 507 BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL, 508 &ioat->hw_desc_tag); 509 if (error != 0) 510 return (error); 511 512 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc, 513 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map); 514 if (error != 0) 515 return (error); 516 517 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 518 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_NOWAIT); 519 if (error) 520 return (error); 521 522 ioat->hw_desc_ring = hw_desc; 523 524 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 525 M_ZERO | M_WAITOK); 526 527 ring = ioat->ring; 528 for (i = 0; i < num_descriptors; i++) { 529 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc)); 530 ring[i].id = i; 531 } 532 533 for (i = 0; i < num_descriptors; i++) { 534 dma_hw_desc = &ioat->hw_desc_ring[i].dma; 535 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1); 536 } 537 538 ioat->tail = ioat->head = 0; 539 *ioat->comp_update = ioat->last_seen = 540 RING_PHYS_ADDR(ioat, ioat->tail - 1); 541 return (0); 542} 543 544static int 545ioat_map_pci_bar(struct ioat_softc *ioat) 546{ 547 548 ioat->pci_resource_id = PCIR_BAR(0); 549 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 550 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 551 552 if (ioat->pci_resource == NULL) { 553 ioat_log_message(0, "unable to allocate pci resource\n"); 554 return (ENODEV); 555 } 556 557 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 558 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 559 return (0); 560} 561 562static void 563ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 564{ 565 struct ioat_softc *ioat = arg; 566 567 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 568 ioat->comp_update_bus_addr = seg[0].ds_addr; 569} 570 571static void 572ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 573{ 574 bus_addr_t *baddr; 575 576 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 577 baddr = arg; 578 *baddr = segs->ds_addr; 579} 580 581/* 582 * Interrupt setup and handlers 583 */ 584static int 585ioat_setup_intr(struct ioat_softc *ioat) 586{ 587 uint32_t num_vectors; 588 int error; 589 boolean_t use_msix; 590 boolean_t force_legacy_interrupts; 591 592 use_msix = FALSE; 593 force_legacy_interrupts = FALSE; 594 595 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 596 num_vectors = 1; 597 pci_alloc_msix(ioat->device, &num_vectors); 598 if (num_vectors == 1) 599 use_msix = TRUE; 600 } 601 602 if (use_msix) { 603 ioat->rid = 1; 604 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 605 &ioat->rid, RF_ACTIVE); 606 } else { 607 ioat->rid = 0; 608 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 609 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 610 } 611 if (ioat->res == NULL) { 612 ioat_log_message(0, "bus_alloc_resource failed\n"); 613 return (ENOMEM); 614 } 615 616 ioat->tag = NULL; 617 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 618 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 619 if (error != 0) { 620 ioat_log_message(0, "bus_setup_intr failed\n"); 621 return (error); 622 } 623 624 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 625 return (0); 626} 627 628static boolean_t 629ioat_model_resets_msix(struct ioat_softc *ioat) 630{ 631 u_int32_t pciid; 632 633 pciid = pci_get_devid(ioat->device); 634 switch (pciid) { 635 /* BWD: */ 636 case 0x0c508086: 637 case 0x0c518086: 638 case 0x0c528086: 639 case 0x0c538086: 640 /* BDXDE: */ 641 case 0x6f508086: 642 case 0x6f518086: 643 case 0x6f528086: 644 case 0x6f538086: 645 return (TRUE); 646 } 647 648 return (FALSE); 649} 650 651static void 652ioat_interrupt_handler(void *arg) 653{ 654 struct ioat_softc *ioat = arg; 655 656 ioat->stats.interrupts++; 657 ioat_process_events(ioat, TRUE); 658} 659 660static int 661chanerr_to_errno(uint32_t chanerr) 662{ 663 664 if (chanerr == 0) 665 return (0); 666 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 667 return (EFAULT); 668 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 669 return (EIO); 670 /* This one is probably our fault: */ 671 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 672 return (EIO); 673 return (EIO); 674} 675 676static void 677ioat_process_events(struct ioat_softc *ioat, boolean_t intr) 678{ 679 struct ioat_descriptor *desc; 680 struct bus_dmadesc *dmadesc; 681 uint64_t comp_update, status; 682 uint32_t completed, chanerr; 683 int error; 684 685 if (intr) { 686 mtx_lock(&ioat->cleanup_lock); 687 } else { 688 if (!mtx_trylock(&ioat->cleanup_lock)) 689 return; 690 } 691 692 /* 693 * Don't run while the hardware is being reset. Reset is responsible 694 * for blocking new work and draining & completing existing work, so 695 * there is nothing to do until new work is queued after reset anyway. 696 */ 697 if (ioat->resetting_cleanup) { 698 mtx_unlock(&ioat->cleanup_lock); 699 return; 700 } 701 702 completed = 0; 703 comp_update = *ioat->comp_update; 704 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 705 706 if (status < ioat->hw_desc_bus_addr || 707 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) * 708 sizeof(struct ioat_generic_hw_descriptor)) 709 panic("Bogus completion address %jx (channel %u)", 710 (uintmax_t)status, ioat->chan_idx); 711 712 if (status == ioat->last_seen) { 713 /* 714 * If we landed in process_events and nothing has been 715 * completed, check for a timeout due to channel halt. 716 */ 717 goto out; 718 } 719 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx", 720 __func__, ioat->chan_idx, comp_update, ioat->last_seen); 721 722 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) { 723 desc = ioat_get_ring_entry(ioat, ioat->tail); 724 dmadesc = &desc->bus_dmadesc; 725 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)", 726 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 727 dmadesc->callback_arg); 728 729 if (dmadesc->callback_fn != NULL) 730 dmadesc->callback_fn(dmadesc->callback_arg, 0); 731 732 completed++; 733 ioat->tail++; 734 } 735 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 736 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 737 738 if (completed != 0) { 739 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1); 740 ioat->stats.descriptors_processed += completed; 741 wakeup(&ioat->tail); 742 } 743 744out: 745 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 746 mtx_unlock(&ioat->cleanup_lock); 747 748 /* 749 * The device doesn't seem to reliably push suspend/halt statuses to 750 * the channel completion memory address, so poll the device register 751 * here. For performance reasons skip it on interrupts, do it only 752 * on much more rare polling events. 753 */ 754 if (!intr) 755 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 756 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 757 return; 758 759 ioat->stats.channel_halts++; 760 761 /* 762 * Fatal programming error on this DMA channel. Flush any outstanding 763 * work with error status and restart the engine. 764 */ 765 mtx_lock(&ioat->submit_lock); 766 ioat->quiescing = TRUE; 767 mtx_unlock(&ioat->submit_lock); 768 769 /* 770 * This is safe to do here because the submit queue is quiesced. We 771 * know that we will drain all outstanding events, so ioat_reset_hw 772 * can't deadlock. It is necessary to protect other ioat_process_event 773 * threads from racing ioat_reset_hw, reading an indeterminate hw 774 * state, and attempting to continue issuing completions. 775 */ 776 mtx_lock(&ioat->cleanup_lock); 777 ioat->resetting_cleanup = TRUE; 778 779 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 780 if (1 <= g_ioat_debug_level) 781 ioat_halted_debug(ioat, chanerr); 782 ioat->stats.last_halt_chanerr = chanerr; 783 784 while (ioat_get_active(ioat) > 0) { 785 desc = ioat_get_ring_entry(ioat, ioat->tail); 786 dmadesc = &desc->bus_dmadesc; 787 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)", 788 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 789 dmadesc->callback_arg); 790 791 if (dmadesc->callback_fn != NULL) 792 dmadesc->callback_fn(dmadesc->callback_arg, 793 chanerr_to_errno(chanerr)); 794 795 ioat->tail++; 796 ioat->stats.descriptors_processed++; 797 ioat->stats.descriptors_error++; 798 } 799 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 800 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 801 802 /* Clear error status */ 803 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 804 805 mtx_unlock(&ioat->cleanup_lock); 806 807 ioat_log_message(0, "Resetting channel to recover from error\n"); 808 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 809 KASSERT(error == 0, 810 ("%s: taskqueue_enqueue failed: %d", __func__, error)); 811} 812 813static void 814ioat_reset_hw_task(void *ctx, int pending __unused) 815{ 816 struct ioat_softc *ioat; 817 int error; 818 819 ioat = ctx; 820 ioat_log_message(1, "%s: Resetting channel\n", __func__); 821 822 error = ioat_reset_hw(ioat); 823 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 824 (void)error; 825} 826 827/* 828 * User API functions 829 */ 830unsigned 831ioat_get_nchannels(void) 832{ 833 834 return (ioat_channel_index); 835} 836 837bus_dmaengine_t 838ioat_get_dmaengine(uint32_t index, int flags) 839{ 840 struct ioat_softc *ioat; 841 842 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 843 ("invalid flags: 0x%08x", flags)); 844 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 845 ("invalid wait | nowait")); 846 847 mtx_lock(&ioat_list_mtx); 848 if (index >= ioat_channel_index || 849 (ioat = ioat_channel[index]) == NULL) { 850 mtx_unlock(&ioat_list_mtx); 851 return (NULL); 852 } 853 mtx_lock(&ioat->submit_lock); 854 mtx_unlock(&ioat_list_mtx); 855 856 if (ioat->destroying) { 857 mtx_unlock(&ioat->submit_lock); 858 return (NULL); 859 } 860 861 ioat_get(ioat); 862 if (ioat->quiescing) { 863 if ((flags & M_NOWAIT) != 0) { 864 ioat_put(ioat); 865 mtx_unlock(&ioat->submit_lock); 866 return (NULL); 867 } 868 869 while (ioat->quiescing && !ioat->destroying) 870 msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0); 871 872 if (ioat->destroying) { 873 ioat_put(ioat); 874 mtx_unlock(&ioat->submit_lock); 875 return (NULL); 876 } 877 } 878 mtx_unlock(&ioat->submit_lock); 879 return (&ioat->dmaengine); 880} 881 882void 883ioat_put_dmaengine(bus_dmaengine_t dmaengine) 884{ 885 struct ioat_softc *ioat; 886 887 ioat = to_ioat_softc(dmaengine); 888 mtx_lock(&ioat->submit_lock); 889 ioat_put(ioat); 890 mtx_unlock(&ioat->submit_lock); 891} 892 893int 894ioat_get_hwversion(bus_dmaengine_t dmaengine) 895{ 896 struct ioat_softc *ioat; 897 898 ioat = to_ioat_softc(dmaengine); 899 return (ioat->version); 900} 901 902size_t 903ioat_get_max_io_size(bus_dmaengine_t dmaengine) 904{ 905 struct ioat_softc *ioat; 906 907 ioat = to_ioat_softc(dmaengine); 908 return (ioat->max_xfer_size); 909} 910 911uint32_t 912ioat_get_capabilities(bus_dmaengine_t dmaengine) 913{ 914 struct ioat_softc *ioat; 915 916 ioat = to_ioat_softc(dmaengine); 917 return (ioat->capabilities); 918} 919 920int 921ioat_get_domain(bus_dmaengine_t dmaengine, int *domain) 922{ 923 struct ioat_softc *ioat; 924 925 ioat = to_ioat_softc(dmaengine); 926 return (bus_get_domain(ioat->device, domain)); 927} 928 929int 930ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 931{ 932 struct ioat_softc *ioat; 933 934 ioat = to_ioat_softc(dmaengine); 935 if (!ioat->intrdelay_supported) 936 return (ENODEV); 937 if (delay > ioat->intrdelay_max) 938 return (ERANGE); 939 940 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 941 ioat->cached_intrdelay = 942 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 943 return (0); 944} 945 946uint16_t 947ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 948{ 949 struct ioat_softc *ioat; 950 951 ioat = to_ioat_softc(dmaengine); 952 return (ioat->intrdelay_max); 953} 954 955void 956ioat_acquire(bus_dmaengine_t dmaengine) 957{ 958 struct ioat_softc *ioat; 959 960 ioat = to_ioat_softc(dmaengine); 961 mtx_lock(&ioat->submit_lock); 962 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 963 ioat->acq_head = ioat->head; 964} 965 966int 967ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 968{ 969 struct ioat_softc *ioat; 970 int error; 971 972 ioat = to_ioat_softc(dmaengine); 973 ioat_acquire(dmaengine); 974 975 error = ioat_reserve_space(ioat, n, mflags); 976 if (error != 0) 977 ioat_release(dmaengine); 978 return (error); 979} 980 981void 982ioat_release(bus_dmaengine_t dmaengine) 983{ 984 struct ioat_softc *ioat; 985 986 ioat = to_ioat_softc(dmaengine); 987 CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__, 988 ioat->chan_idx, ioat->head); 989 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */); 990 CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__, 991 ioat->chan_idx, ioat->head); 992 993 if (ioat->acq_head != ioat->head) { 994 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, 995 (uint16_t)ioat->head); 996 997 if (!callout_pending(&ioat->poll_timer)) { 998 callout_reset(&ioat->poll_timer, 1, 999 ioat_poll_timer_callback, ioat); 1000 } 1001 } 1002 mtx_unlock(&ioat->submit_lock); 1003} 1004 1005static struct ioat_descriptor * 1006ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 1007 uint32_t size, uint64_t src, uint64_t dst, 1008 bus_dmaengine_callback_t callback_fn, void *callback_arg, 1009 uint32_t flags) 1010{ 1011 struct ioat_generic_hw_descriptor *hw_desc; 1012 struct ioat_descriptor *desc; 1013 int mflags; 1014 1015 mtx_assert(&ioat->submit_lock, MA_OWNED); 1016 1017 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 1018 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 1019 KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)", 1020 __func__, (unsigned)size, ioat->max_xfer_size)); 1021 1022 if ((flags & DMA_NO_WAIT) != 0) 1023 mflags = M_NOWAIT; 1024 else 1025 mflags = M_WAITOK; 1026 1027 if (ioat_reserve_space(ioat, 1, mflags) != 0) 1028 return (NULL); 1029 1030 desc = ioat_get_ring_entry(ioat, ioat->head); 1031 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic; 1032 1033 hw_desc->u.control_raw = 0; 1034 hw_desc->u.control_generic.op = op; 1035 hw_desc->u.control_generic.completion_update = 1; 1036 1037 if ((flags & DMA_INT_EN) != 0) 1038 hw_desc->u.control_generic.int_enable = 1; 1039 if ((flags & DMA_FENCE) != 0) 1040 hw_desc->u.control_generic.fence = 1; 1041 1042 hw_desc->size = size; 1043 hw_desc->src_addr = src; 1044 hw_desc->dest_addr = dst; 1045 1046 desc->bus_dmadesc.callback_fn = callback_fn; 1047 desc->bus_dmadesc.callback_arg = callback_arg; 1048 return (desc); 1049} 1050 1051struct bus_dmadesc * 1052ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 1053 void *callback_arg, uint32_t flags) 1054{ 1055 struct ioat_dma_hw_descriptor *hw_desc; 1056 struct ioat_descriptor *desc; 1057 struct ioat_softc *ioat; 1058 1059 ioat = to_ioat_softc(dmaengine); 1060 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1061 1062 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 1063 callback_arg, flags); 1064 if (desc == NULL) 1065 return (NULL); 1066 1067 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1068 hw_desc->u.control.null = 1; 1069 ioat_submit_single(ioat); 1070 return (&desc->bus_dmadesc); 1071} 1072 1073struct bus_dmadesc * 1074ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 1075 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 1076 void *callback_arg, uint32_t flags) 1077{ 1078 struct ioat_dma_hw_descriptor *hw_desc; 1079 struct ioat_descriptor *desc; 1080 struct ioat_softc *ioat; 1081 1082 ioat = to_ioat_softc(dmaengine); 1083 1084 KASSERT(((src | dst) & (0xffffull << 48)) == 0, 1085 ("%s: high 16 bits of src/dst are not zero", __func__)); 1086 1087 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 1088 callback_arg, flags); 1089 if (desc == NULL) 1090 return (NULL); 1091 1092 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1093 if (g_ioat_debug_level >= 3) 1094 dump_descriptor(hw_desc); 1095 1096 ioat_submit_single(ioat); 1097 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx", 1098 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len); 1099 return (&desc->bus_dmadesc); 1100} 1101 1102struct bus_dmadesc * 1103ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1104 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1105 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1106{ 1107 struct ioat_dma_hw_descriptor *hw_desc; 1108 struct ioat_descriptor *desc; 1109 struct ioat_softc *ioat; 1110 1111 ioat = to_ioat_softc(dmaengine); 1112 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1113 1114 KASSERT(((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) == 0, 1115 ("%s: high 16 bits of src/dst are not zero", __func__)); 1116 KASSERT(((src1 | src2 | dst1 | dst2) & PAGE_MASK) == 0, 1117 ("%s: addresses are not page-aligned", __func__)); 1118 1119 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, 1120 callback_fn, callback_arg, flags); 1121 if (desc == NULL) 1122 return (NULL); 1123 1124 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1125 if (src2 != src1 + PAGE_SIZE) { 1126 hw_desc->u.control.src_page_break = 1; 1127 hw_desc->next_src_addr = src2; 1128 } 1129 if (dst2 != dst1 + PAGE_SIZE) { 1130 hw_desc->u.control.dest_page_break = 1; 1131 hw_desc->next_dest_addr = dst2; 1132 } 1133 1134 if (g_ioat_debug_level >= 3) 1135 dump_descriptor(hw_desc); 1136 1137 ioat_submit_single(ioat); 1138 return (&desc->bus_dmadesc); 1139} 1140 1141struct bus_dmadesc * 1142ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1143 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1144 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1145{ 1146 struct ioat_crc32_hw_descriptor *hw_desc; 1147 struct ioat_descriptor *desc; 1148 struct ioat_softc *ioat; 1149 uint32_t teststore; 1150 uint8_t op; 1151 1152 ioat = to_ioat_softc(dmaengine); 1153 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1154 1155 KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0, 1156 ("%s: device lacks MOVECRC capability", __func__)); 1157 KASSERT(((src | dst) & (0xffffffull << 40)) == 0, 1158 ("%s: high 24 bits of src/dst are not zero", __func__)); 1159 teststore = (flags & _DMA_CRC_TESTSTORE); 1160 KASSERT(teststore != _DMA_CRC_TESTSTORE, 1161 ("%s: TEST and STORE invalid", __func__)); 1162 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1163 ("%s: INLINE invalid without TEST or STORE", __func__)); 1164 1165 switch (teststore) { 1166 case DMA_CRC_STORE: 1167 op = IOAT_OP_MOVECRC_STORE; 1168 break; 1169 case DMA_CRC_TEST: 1170 op = IOAT_OP_MOVECRC_TEST; 1171 break; 1172 default: 1173 KASSERT(teststore == 0, ("bogus")); 1174 op = IOAT_OP_MOVECRC; 1175 break; 1176 } 1177 1178 KASSERT((flags & DMA_CRC_INLINE) != 0 || 1179 (crcptr & (0xffffffull << 40)) == 0, 1180 ("%s: high 24 bits of crcptr are not zero", __func__)); 1181 1182 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1183 callback_arg, flags & ~_DMA_CRC_FLAGS); 1184 if (desc == NULL) 1185 return (NULL); 1186 1187 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1188 1189 if ((flags & DMA_CRC_INLINE) == 0) 1190 hw_desc->crc_address = crcptr; 1191 else 1192 hw_desc->u.control.crc_location = 1; 1193 1194 if (initialseed != NULL) { 1195 hw_desc->u.control.use_seed = 1; 1196 hw_desc->seed = *initialseed; 1197 } 1198 1199 if (g_ioat_debug_level >= 3) 1200 dump_descriptor(hw_desc); 1201 1202 ioat_submit_single(ioat); 1203 return (&desc->bus_dmadesc); 1204} 1205 1206struct bus_dmadesc * 1207ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1208 uint32_t *initialseed, bus_addr_t crcptr, 1209 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1210{ 1211 struct ioat_crc32_hw_descriptor *hw_desc; 1212 struct ioat_descriptor *desc; 1213 struct ioat_softc *ioat; 1214 uint32_t teststore; 1215 uint8_t op; 1216 1217 ioat = to_ioat_softc(dmaengine); 1218 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1219 1220 KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0, 1221 ("%s: device lacks CRC capability", __func__)); 1222 KASSERT((src & (0xffffffull << 40)) == 0, 1223 ("%s: high 24 bits of src are not zero", __func__)); 1224 teststore = (flags & _DMA_CRC_TESTSTORE); 1225 KASSERT(teststore != _DMA_CRC_TESTSTORE, 1226 ("%s: TEST and STORE invalid", __func__)); 1227 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1228 ("%s: INLINE invalid without TEST or STORE", __func__)); 1229 1230 switch (teststore) { 1231 case DMA_CRC_STORE: 1232 op = IOAT_OP_CRC_STORE; 1233 break; 1234 case DMA_CRC_TEST: 1235 op = IOAT_OP_CRC_TEST; 1236 break; 1237 default: 1238 KASSERT(teststore == 0, ("bogus")); 1239 op = IOAT_OP_CRC; 1240 break; 1241 } 1242 1243 KASSERT((flags & DMA_CRC_INLINE) != 0 || 1244 (crcptr & (0xffffffull << 40)) == 0, 1245 ("%s: high 24 bits of crcptr are not zero", __func__)); 1246 1247 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1248 callback_arg, flags & ~_DMA_CRC_FLAGS); 1249 if (desc == NULL) 1250 return (NULL); 1251 1252 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1253 1254 if ((flags & DMA_CRC_INLINE) == 0) 1255 hw_desc->crc_address = crcptr; 1256 else 1257 hw_desc->u.control.crc_location = 1; 1258 1259 if (initialseed != NULL) { 1260 hw_desc->u.control.use_seed = 1; 1261 hw_desc->seed = *initialseed; 1262 } 1263 1264 if (g_ioat_debug_level >= 3) 1265 dump_descriptor(hw_desc); 1266 1267 ioat_submit_single(ioat); 1268 return (&desc->bus_dmadesc); 1269} 1270 1271struct bus_dmadesc * 1272ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1273 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1274 uint32_t flags) 1275{ 1276 struct ioat_fill_hw_descriptor *hw_desc; 1277 struct ioat_descriptor *desc; 1278 struct ioat_softc *ioat; 1279 1280 ioat = to_ioat_softc(dmaengine); 1281 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1282 1283 KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0, 1284 ("%s: device lacks BFILL capability", __func__)); 1285 KASSERT((dst & (0xffffull << 48)) == 0, 1286 ("%s: high 16 bits of crcptr are not zero", __func__)); 1287 1288 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 1289 callback_fn, callback_arg, flags); 1290 if (desc == NULL) 1291 return (NULL); 1292 1293 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill; 1294 if (g_ioat_debug_level >= 3) 1295 dump_descriptor(hw_desc); 1296 1297 ioat_submit_single(ioat); 1298 return (&desc->bus_dmadesc); 1299} 1300 1301/* 1302 * Ring Management 1303 */ 1304static inline uint32_t 1305ioat_get_active(struct ioat_softc *ioat) 1306{ 1307 1308 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1309} 1310 1311static inline uint32_t 1312ioat_get_ring_space(struct ioat_softc *ioat) 1313{ 1314 1315 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1316} 1317 1318/* 1319 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1320 * for 'num_descs'. 1321 * 1322 * If mflags contains M_WAITOK, blocks until enough space is available. 1323 * 1324 * Returns zero on success, or an errno on error. If num_descs is beyond the 1325 * maximum ring size, returns EINVAl; if allocation would block and mflags 1326 * contains M_NOWAIT, returns EAGAIN. 1327 * 1328 * Must be called with the submit_lock held; returns with the lock held. The 1329 * lock may be dropped to allocate the ring. 1330 * 1331 * (The submit_lock is needed to add any entries to the ring, so callers are 1332 * assured enough room is available.) 1333 */ 1334static int 1335ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1336{ 1337 boolean_t dug; 1338 int error; 1339 1340 mtx_assert(&ioat->submit_lock, MA_OWNED); 1341 error = 0; 1342 dug = FALSE; 1343 1344 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) { 1345 error = EINVAL; 1346 goto out; 1347 } 1348 1349 for (;;) { 1350 if (ioat->quiescing) { 1351 error = ENXIO; 1352 goto out; 1353 } 1354 1355 if (ioat_get_ring_space(ioat) >= num_descs) 1356 goto out; 1357 1358 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__, 1359 ioat->chan_idx, num_descs); 1360 1361 if (!dug && !ioat->is_submitter_processing) { 1362 ioat->is_submitter_processing = TRUE; 1363 mtx_unlock(&ioat->submit_lock); 1364 1365 CTR2(KTR_IOAT, "%s channel=%u attempting to process events", 1366 __func__, ioat->chan_idx); 1367 ioat_process_events(ioat, FALSE); 1368 1369 mtx_lock(&ioat->submit_lock); 1370 dug = TRUE; 1371 KASSERT(ioat->is_submitter_processing == TRUE, 1372 ("is_submitter_processing")); 1373 ioat->is_submitter_processing = FALSE; 1374 wakeup(&ioat->tail); 1375 continue; 1376 } 1377 1378 if ((mflags & M_WAITOK) == 0) { 1379 error = EAGAIN; 1380 break; 1381 } 1382 CTR2(KTR_IOAT, "%s channel=%u blocking on completions", 1383 __func__, ioat->chan_idx); 1384 msleep(&ioat->tail, &ioat->submit_lock, 0, 1385 "ioat_full", 0); 1386 continue; 1387 } 1388 1389out: 1390 mtx_assert(&ioat->submit_lock, MA_OWNED); 1391 KASSERT(!ioat->quiescing || error == ENXIO, 1392 ("reserved during quiesce")); 1393 return (error); 1394} 1395 1396static void 1397ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1398 struct ioat_descriptor *ring) 1399{ 1400 1401 free(ring, M_IOAT); 1402} 1403 1404static struct ioat_descriptor * 1405ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1406{ 1407 1408 return (&ioat->ring[index % (1 << ioat->ring_size_order)]); 1409} 1410 1411static union ioat_hw_descriptor * 1412ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index) 1413{ 1414 1415 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]); 1416} 1417 1418static void 1419ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1420{ 1421 union ioat_hw_descriptor *desc; 1422 1423 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1424 IOAT_CHANERR_STR); 1425 if (chanerr == 0) 1426 return; 1427 1428 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1429 1430 desc = ioat_get_descriptor(ioat, ioat->tail + 0); 1431 dump_descriptor(desc); 1432 1433 desc = ioat_get_descriptor(ioat, ioat->tail + 1); 1434 dump_descriptor(desc); 1435} 1436 1437static void 1438ioat_poll_timer_callback(void *arg) 1439{ 1440 struct ioat_softc *ioat; 1441 1442 ioat = arg; 1443 CTR1(KTR_IOAT, "%s", __func__); 1444 1445 ioat_process_events(ioat, FALSE); 1446 1447 mtx_lock(&ioat->submit_lock); 1448 if (ioat_get_active(ioat) > 0) 1449 callout_schedule(&ioat->poll_timer, 1); 1450 mtx_unlock(&ioat->submit_lock); 1451} 1452 1453/* 1454 * Support Functions 1455 */ 1456static void 1457ioat_submit_single(struct ioat_softc *ioat) 1458{ 1459 1460 mtx_assert(&ioat->submit_lock, MA_OWNED); 1461 1462 ioat->head++; 1463 CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__, 1464 ioat->chan_idx, ioat->head, ioat->tail); 1465 1466 ioat->stats.descriptors_submitted++; 1467} 1468 1469static int 1470ioat_reset_hw(struct ioat_softc *ioat) 1471{ 1472 uint64_t status; 1473 uint32_t chanerr; 1474 unsigned timeout; 1475 int error; 1476 1477 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1478 1479 mtx_lock(&ioat->submit_lock); 1480 while (ioat->resetting && !ioat->destroying) 1481 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0); 1482 if (ioat->destroying) { 1483 mtx_unlock(&ioat->submit_lock); 1484 return (ENXIO); 1485 } 1486 ioat->resetting = TRUE; 1487 ioat->quiescing = TRUE; 1488 mtx_unlock(&ioat->submit_lock); 1489 mtx_lock(&ioat->cleanup_lock); 1490 while (ioat_get_active(ioat) > 0) 1491 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 1492 1493 /* 1494 * Suspend ioat_process_events while the hardware and softc are in an 1495 * indeterminate state. 1496 */ 1497 ioat->resetting_cleanup = TRUE; 1498 mtx_unlock(&ioat->cleanup_lock); 1499 1500 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__, 1501 ioat->chan_idx); 1502 1503 status = ioat_get_chansts(ioat); 1504 if (is_ioat_active(status) || is_ioat_idle(status)) 1505 ioat_suspend(ioat); 1506 1507 /* Wait at most 20 ms */ 1508 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1509 timeout < 20; timeout++) { 1510 DELAY(1000); 1511 status = ioat_get_chansts(ioat); 1512 } 1513 if (timeout == 20) { 1514 error = ETIMEDOUT; 1515 goto out; 1516 } 1517 1518 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1519 1520 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1521 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1522 1523 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__, 1524 ioat->chan_idx); 1525 1526 /* 1527 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1528 * that can cause stability issues for IOAT v3. 1529 */ 1530 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1531 4); 1532 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1533 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1534 1535 /* 1536 * BDXDE and BWD models reset MSI-X registers on device reset. 1537 * Save/restore their contents manually. 1538 */ 1539 if (ioat_model_resets_msix(ioat)) { 1540 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1541 pci_save_state(ioat->device); 1542 } 1543 1544 ioat_reset(ioat); 1545 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__, 1546 ioat->chan_idx); 1547 1548 /* Wait at most 20 ms */ 1549 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1550 DELAY(1000); 1551 if (timeout == 20) { 1552 error = ETIMEDOUT; 1553 goto out; 1554 } 1555 1556 if (ioat_model_resets_msix(ioat)) { 1557 ioat_log_message(1, "device resets registers; restored\n"); 1558 pci_restore_state(ioat->device); 1559 } 1560 1561 /* Reset attempts to return the hardware to "halted." */ 1562 status = ioat_get_chansts(ioat); 1563 if (is_ioat_active(status) || is_ioat_idle(status)) { 1564 /* So this really shouldn't happen... */ 1565 ioat_log_message(0, "Device is active after a reset?\n"); 1566 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1567 error = 0; 1568 goto out; 1569 } 1570 1571 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1572 if (chanerr != 0) { 1573 mtx_lock(&ioat->cleanup_lock); 1574 ioat_halted_debug(ioat, chanerr); 1575 mtx_unlock(&ioat->cleanup_lock); 1576 error = EIO; 1577 goto out; 1578 } 1579 1580 /* 1581 * Bring device back online after reset. Writing CHAINADDR brings the 1582 * device back to active. 1583 * 1584 * The internal ring counter resets to zero, so we have to start over 1585 * at zero as well. 1586 */ 1587 ioat->tail = ioat->head = 0; 1588 *ioat->comp_update = ioat->last_seen = 1589 RING_PHYS_ADDR(ioat, ioat->tail - 1); 1590 1591 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1592 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1593 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0)); 1594 error = 0; 1595 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__, 1596 ioat->chan_idx); 1597 1598out: 1599 /* Enqueues a null operation and ensures it completes. */ 1600 if (error == 0) { 1601 error = ioat_start_channel(ioat); 1602 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__, 1603 ioat->chan_idx); 1604 } 1605 1606 /* 1607 * Resume completions now that ring state is consistent. 1608 */ 1609 mtx_lock(&ioat->cleanup_lock); 1610 ioat->resetting_cleanup = FALSE; 1611 mtx_unlock(&ioat->cleanup_lock); 1612 1613 /* Unblock submission of new work */ 1614 mtx_lock(&ioat->submit_lock); 1615 ioat->quiescing = FALSE; 1616 wakeup(&ioat->quiescing); 1617 1618 ioat->resetting = FALSE; 1619 wakeup(&ioat->resetting); 1620 1621 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx); 1622 mtx_unlock(&ioat->submit_lock); 1623 1624 return (error); 1625} 1626 1627static int 1628sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1629{ 1630 struct ioat_softc *ioat; 1631 struct sbuf sb; 1632 uint64_t status; 1633 int error; 1634 1635 ioat = arg1; 1636 1637 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1638 1639 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1640 switch (status) { 1641 case IOAT_CHANSTS_ACTIVE: 1642 sbuf_printf(&sb, "ACTIVE"); 1643 break; 1644 case IOAT_CHANSTS_IDLE: 1645 sbuf_printf(&sb, "IDLE"); 1646 break; 1647 case IOAT_CHANSTS_SUSPENDED: 1648 sbuf_printf(&sb, "SUSPENDED"); 1649 break; 1650 case IOAT_CHANSTS_HALTED: 1651 sbuf_printf(&sb, "HALTED"); 1652 break; 1653 case IOAT_CHANSTS_ARMED: 1654 sbuf_printf(&sb, "ARMED"); 1655 break; 1656 default: 1657 sbuf_printf(&sb, "UNKNOWN"); 1658 break; 1659 } 1660 error = sbuf_finish(&sb); 1661 sbuf_delete(&sb); 1662 1663 if (error != 0 || req->newptr == NULL) 1664 return (error); 1665 return (EINVAL); 1666} 1667 1668static int 1669sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1670{ 1671 struct ioat_softc *ioat; 1672 struct sbuf sb; 1673#define PRECISION "1" 1674 const uintmax_t factor = 10; 1675 uintmax_t rate; 1676 int error; 1677 1678 ioat = arg1; 1679 sbuf_new_for_sysctl(&sb, NULL, 16, req); 1680 1681 if (ioat->stats.interrupts == 0) { 1682 sbuf_printf(&sb, "NaN"); 1683 goto out; 1684 } 1685 rate = ioat->stats.descriptors_processed * factor / 1686 ioat->stats.interrupts; 1687 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1688 rate % factor); 1689#undef PRECISION 1690out: 1691 error = sbuf_finish(&sb); 1692 sbuf_delete(&sb); 1693 if (error != 0 || req->newptr == NULL) 1694 return (error); 1695 return (EINVAL); 1696} 1697 1698static int 1699sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1700{ 1701 struct ioat_softc *ioat; 1702 int error, arg; 1703 1704 ioat = arg1; 1705 1706 arg = 0; 1707 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1708 if (error != 0 || req->newptr == NULL) 1709 return (error); 1710 1711 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1712 if (error != 0) 1713 return (error); 1714 1715 if (arg != 0) 1716 error = ioat_reset_hw(ioat); 1717 1718 return (error); 1719} 1720 1721static void 1722dump_descriptor(void *hw_desc) 1723{ 1724 int i, j; 1725 1726 for (i = 0; i < 2; i++) { 1727 for (j = 0; j < 8; j++) 1728 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1729 printf("\n"); 1730 } 1731} 1732 1733static void 1734ioat_setup_sysctl(device_t device) 1735{ 1736 struct sysctl_oid_list *par, *statpar, *state, *hammer; 1737 struct sysctl_ctx_list *ctx; 1738 struct sysctl_oid *tree, *tmp; 1739 struct ioat_softc *ioat; 1740 1741 ioat = DEVICE2SOFTC(device); 1742 ctx = device_get_sysctl_ctx(device); 1743 tree = device_get_sysctl_tree(device); 1744 par = SYSCTL_CHILDREN(tree); 1745 1746 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1747 &ioat->version, 0, "HW version (0xMM form)"); 1748 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1749 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1750 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 1751 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 1752 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 1753 &ioat->intrdelay_max, 0, 1754 "Maximum configurable INTRDELAY on this channel (microseconds)"); 1755 1756 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 1757 "IOAT channel internal state"); 1758 state = SYSCTL_CHILDREN(tmp); 1759 1760 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1761 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1762 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 1763 0, "SW descriptor head pointer index"); 1764 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 1765 0, "SW descriptor tail pointer index"); 1766 1767 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 1768 ioat->comp_update, "HW addr of last completion"); 1769 1770 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing", 1771 CTLFLAG_RD, &ioat->is_submitter_processing, 0, 1772 "submitter processing"); 1773 1774 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 1775 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1776 "String of the channel status"); 1777 1778 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 1779 &ioat->cached_intrdelay, 0, 1780 "Current INTRDELAY on this channel (cached, microseconds)"); 1781 1782 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 1783 "Big hammers (mostly for testing)"); 1784 hammer = SYSCTL_CHILDREN(tmp); 1785 1786 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 1787 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1788 "Set to non-zero to reset the hardware"); 1789 1790 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 1791 "IOAT channel statistics"); 1792 statpar = SYSCTL_CHILDREN(tmp); 1793 1794 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 1795 &ioat->stats.interrupts, 1796 "Number of interrupts processed on this channel"); 1797 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 1798 &ioat->stats.descriptors_processed, 1799 "Number of descriptors processed on this channel"); 1800 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 1801 &ioat->stats.descriptors_submitted, 1802 "Number of descriptors submitted to this channel"); 1803 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 1804 &ioat->stats.descriptors_error, 1805 "Number of descriptors failed by channel errors"); 1806 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 1807 &ioat->stats.channel_halts, 0, 1808 "Number of times the channel has halted"); 1809 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 1810 &ioat->stats.last_halt_chanerr, 0, 1811 "The raw CHANERR when the channel was last halted"); 1812 1813 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 1814 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 1815 "Descriptors per interrupt"); 1816} 1817 1818static void 1819ioat_get(struct ioat_softc *ioat) 1820{ 1821 1822 mtx_assert(&ioat->submit_lock, MA_OWNED); 1823 KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow")); 1824 1825 ioat->refcnt++; 1826} 1827 1828static void 1829ioat_put(struct ioat_softc *ioat) 1830{ 1831 1832 mtx_assert(&ioat->submit_lock, MA_OWNED); 1833 KASSERT(ioat->refcnt >= 1, ("refcnt error")); 1834 1835 if (--ioat->refcnt == 0) 1836 wakeup(&ioat->refcnt); 1837} 1838 1839static void 1840ioat_drain_locked(struct ioat_softc *ioat) 1841{ 1842 1843 mtx_assert(&ioat->submit_lock, MA_OWNED); 1844 1845 while (ioat->refcnt > 0) 1846 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0); 1847} 1848 1849#ifdef DDB 1850#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 1851#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 1852DB_SHOW_COMMAND(ioat, db_show_ioat) 1853{ 1854 struct ioat_softc *sc; 1855 unsigned idx; 1856 1857 if (!have_addr) 1858 goto usage; 1859 idx = (unsigned)addr; 1860 if (idx >= ioat_channel_index) 1861 goto usage; 1862 1863 sc = ioat_channel[idx]; 1864 db_printf("ioat softc at %p\n", sc); 1865 if (sc == NULL) 1866 return; 1867 1868 db_printf(" version: %d\n", sc->version); 1869 db_printf(" chan_idx: %u\n", sc->chan_idx); 1870 db_printf(" submit_lock: "); 1871 db_show_lock(&sc->submit_lock); 1872 1873 db_printf(" capabilities: %b\n", (int)sc->capabilities, 1874 IOAT_DMACAP_STR); 1875 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 1876 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 1877 1878 db_printf(" poll_timer:\n"); 1879 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 1880 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 1881 db_printf(" c_func: %p\n", sc->poll_timer.c_func); 1882 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 1883 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 1884 1885 db_printf(" quiescing: %d\n", (int)sc->quiescing); 1886 db_printf(" destroying: %d\n", (int)sc->destroying); 1887 db_printf(" is_submitter_processing: %d\n", 1888 (int)sc->is_submitter_processing); 1889 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 1890 db_printf(" resetting: %d\n", (int)sc->resetting); 1891 1892 db_printf(" head: %u\n", sc->head); 1893 db_printf(" tail: %u\n", sc->tail); 1894 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 1895 db_printf(" last_seen: 0x%lx\n", sc->last_seen); 1896 db_printf(" ring: %p\n", sc->ring); 1897 db_printf(" descriptors: %p\n", sc->hw_desc_ring); 1898 db_printf(" descriptors (phys): 0x%jx\n", 1899 (uintmax_t)sc->hw_desc_bus_addr); 1900 1901 db_printf(" ring[%u] (tail):\n", sc->tail % 1902 (1 << sc->ring_size_order)); 1903 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id); 1904 db_printf(" addr: 0x%lx\n", 1905 RING_PHYS_ADDR(sc, sc->tail)); 1906 db_printf(" next: 0x%lx\n", 1907 ioat_get_descriptor(sc, sc->tail)->generic.next); 1908 1909 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) % 1910 (1 << sc->ring_size_order)); 1911 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id); 1912 db_printf(" addr: 0x%lx\n", 1913 RING_PHYS_ADDR(sc, sc->head - 1)); 1914 db_printf(" next: 0x%lx\n", 1915 ioat_get_descriptor(sc, sc->head - 1)->generic.next); 1916 1917 db_printf(" ring[%u] (head):\n", (sc->head) % 1918 (1 << sc->ring_size_order)); 1919 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id); 1920 db_printf(" addr: 0x%lx\n", 1921 RING_PHYS_ADDR(sc, sc->head)); 1922 db_printf(" next: 0x%lx\n", 1923 ioat_get_descriptor(sc, sc->head)->generic.next); 1924 1925 for (idx = 0; idx < (1 << sc->ring_size_order); idx++) 1926 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK) 1927 == RING_PHYS_ADDR(sc, idx)) 1928 db_printf(" ring[%u] == hardware tail\n", idx); 1929 1930 db_printf(" cleanup_lock: "); 1931 db_show_lock(&sc->cleanup_lock); 1932 1933 db_printf(" refcnt: %u\n", sc->refcnt); 1934 db_printf(" stats:\n"); 1935 db_printf(" interrupts: %lu\n", sc->stats.interrupts); 1936 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 1937 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 1938 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 1939 1940 db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 1941 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 1942 1943 if (db_pager_quit) 1944 return; 1945 1946 db_printf(" hw status:\n"); 1947 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 1948 db_printf(" chanctrl: 0x%x\n", 1949 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 1950 db_printf(" chancmd: 0x%x\n", 1951 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 1952 db_printf(" dmacount: 0x%x\n", 1953 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 1954 db_printf(" chainaddr: 0x%lx\n", 1955 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 1956 db_printf(" chancmp: 0x%lx\n", 1957 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 1958 db_printf(" chanerr: %b\n", 1959 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 1960 return; 1961usage: 1962 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 1963 return; 1964} 1965#endif /* DDB */ 1966