ioat.c revision 290087
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/ioat/ioat.c 290087 2015-10-28 02:37:24Z cem $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bus.h> 33#include <sys/conf.h> 34#include <sys/ioccom.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/module.h> 39#include <sys/mutex.h> 40#include <sys/rman.h> 41#include <sys/sysctl.h> 42#include <sys/time.h> 43#include <dev/pci/pcireg.h> 44#include <dev/pci/pcivar.h> 45#include <machine/bus.h> 46#include <machine/resource.h> 47#include <machine/stdarg.h> 48 49#include "ioat.h" 50#include "ioat_hw.h" 51#include "ioat_internal.h" 52 53#define IOAT_INTR_TIMO (hz / 10) 54#define IOAT_REFLK (&ioat->submit_lock) 55 56static int ioat_probe(device_t device); 57static int ioat_attach(device_t device); 58static int ioat_detach(device_t device); 59static int ioat_setup_intr(struct ioat_softc *ioat); 60static int ioat_teardown_intr(struct ioat_softc *ioat); 61static int ioat3_attach(device_t device); 62static int ioat_start_channel(struct ioat_softc *ioat); 63static int ioat_map_pci_bar(struct ioat_softc *ioat); 64static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 65 int error); 66static void ioat_interrupt_handler(void *arg); 67static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 68static void ioat_process_events(struct ioat_softc *ioat); 69static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 70static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 71static void ioat_free_ring(struct ioat_softc *, uint32_t size, 72 struct ioat_descriptor **); 73static void ioat_free_ring_entry(struct ioat_softc *ioat, 74 struct ioat_descriptor *desc); 75static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, 76 int mflags); 77static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 78static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, 79 uint32_t index); 80static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, 81 uint32_t size, boolean_t need_dscr, int mflags); 82static int ring_grow(struct ioat_softc *, uint32_t oldorder, 83 struct ioat_descriptor **); 84static int ring_shrink(struct ioat_softc *, uint32_t oldorder, 85 struct ioat_descriptor **); 86static void ioat_timer_callback(void *arg); 87static void dump_descriptor(void *hw_desc); 88static void ioat_submit_single(struct ioat_softc *ioat); 89static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 90 int error); 91static int ioat_reset_hw(struct ioat_softc *ioat); 92static void ioat_setup_sysctl(device_t device); 93static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 94static inline struct ioat_softc *ioat_get(struct ioat_softc *, 95 enum ioat_ref_kind); 96static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); 97static inline void ioat_putn(struct ioat_softc *, uint32_t, 98 enum ioat_ref_kind); 99static void ioat_drain(struct ioat_softc *); 100 101#define ioat_log_message(v, ...) do { \ 102 if ((v) <= g_ioat_debug_level) { \ 103 device_printf(ioat->device, __VA_ARGS__); \ 104 } \ 105} while (0) 106 107MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 108SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 109 110static int g_force_legacy_interrupts; 111SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 112 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 113 114int g_ioat_debug_level = 0; 115SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 116 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 117 118/* 119 * OS <-> Driver interface structures 120 */ 121static device_method_t ioat_pci_methods[] = { 122 /* Device interface */ 123 DEVMETHOD(device_probe, ioat_probe), 124 DEVMETHOD(device_attach, ioat_attach), 125 DEVMETHOD(device_detach, ioat_detach), 126 { 0, 0 } 127}; 128 129static driver_t ioat_pci_driver = { 130 "ioat", 131 ioat_pci_methods, 132 sizeof(struct ioat_softc), 133}; 134 135static devclass_t ioat_devclass; 136DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 137 138/* 139 * Private data structures 140 */ 141static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 142static int ioat_channel_index = 0; 143SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 144 "Number of IOAT channels attached"); 145 146static struct _pcsid 147{ 148 u_int32_t type; 149 const char *desc; 150} pci_ids[] = { 151 { 0x34308086, "TBG IOAT Ch0" }, 152 { 0x34318086, "TBG IOAT Ch1" }, 153 { 0x34328086, "TBG IOAT Ch2" }, 154 { 0x34338086, "TBG IOAT Ch3" }, 155 { 0x34298086, "TBG IOAT Ch4" }, 156 { 0x342a8086, "TBG IOAT Ch5" }, 157 { 0x342b8086, "TBG IOAT Ch6" }, 158 { 0x342c8086, "TBG IOAT Ch7" }, 159 160 { 0x37108086, "JSF IOAT Ch0" }, 161 { 0x37118086, "JSF IOAT Ch1" }, 162 { 0x37128086, "JSF IOAT Ch2" }, 163 { 0x37138086, "JSF IOAT Ch3" }, 164 { 0x37148086, "JSF IOAT Ch4" }, 165 { 0x37158086, "JSF IOAT Ch5" }, 166 { 0x37168086, "JSF IOAT Ch6" }, 167 { 0x37178086, "JSF IOAT Ch7" }, 168 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 169 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 170 171 { 0x3c208086, "SNB IOAT Ch0" }, 172 { 0x3c218086, "SNB IOAT Ch1" }, 173 { 0x3c228086, "SNB IOAT Ch2" }, 174 { 0x3c238086, "SNB IOAT Ch3" }, 175 { 0x3c248086, "SNB IOAT Ch4" }, 176 { 0x3c258086, "SNB IOAT Ch5" }, 177 { 0x3c268086, "SNB IOAT Ch6" }, 178 { 0x3c278086, "SNB IOAT Ch7" }, 179 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 180 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 181 182 { 0x0e208086, "IVB IOAT Ch0" }, 183 { 0x0e218086, "IVB IOAT Ch1" }, 184 { 0x0e228086, "IVB IOAT Ch2" }, 185 { 0x0e238086, "IVB IOAT Ch3" }, 186 { 0x0e248086, "IVB IOAT Ch4" }, 187 { 0x0e258086, "IVB IOAT Ch5" }, 188 { 0x0e268086, "IVB IOAT Ch6" }, 189 { 0x0e278086, "IVB IOAT Ch7" }, 190 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 191 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 192 193 { 0x2f208086, "HSW IOAT Ch0" }, 194 { 0x2f218086, "HSW IOAT Ch1" }, 195 { 0x2f228086, "HSW IOAT Ch2" }, 196 { 0x2f238086, "HSW IOAT Ch3" }, 197 { 0x2f248086, "HSW IOAT Ch4" }, 198 { 0x2f258086, "HSW IOAT Ch5" }, 199 { 0x2f268086, "HSW IOAT Ch6" }, 200 { 0x2f278086, "HSW IOAT Ch7" }, 201 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 202 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 203 204 { 0x0c508086, "BWD IOAT Ch0" }, 205 { 0x0c518086, "BWD IOAT Ch1" }, 206 { 0x0c528086, "BWD IOAT Ch2" }, 207 { 0x0c538086, "BWD IOAT Ch3" }, 208 209 { 0x6f508086, "BDXDE IOAT Ch0" }, 210 { 0x6f518086, "BDXDE IOAT Ch1" }, 211 { 0x6f528086, "BDXDE IOAT Ch2" }, 212 { 0x6f538086, "BDXDE IOAT Ch3" }, 213 214 { 0x00000000, NULL } 215}; 216 217/* 218 * OS <-> Driver linkage functions 219 */ 220static int 221ioat_probe(device_t device) 222{ 223 struct _pcsid *ep; 224 u_int32_t type; 225 226 type = pci_get_devid(device); 227 for (ep = pci_ids; ep->type; ep++) { 228 if (ep->type == type) { 229 device_set_desc(device, ep->desc); 230 return (0); 231 } 232 } 233 return (ENXIO); 234} 235 236static int 237ioat_attach(device_t device) 238{ 239 struct ioat_softc *ioat; 240 int error; 241 242 ioat = DEVICE2SOFTC(device); 243 ioat->device = device; 244 245 error = ioat_map_pci_bar(ioat); 246 if (error != 0) 247 goto err; 248 249 ioat->version = ioat_read_cbver(ioat); 250 if (ioat->version < IOAT_VER_3_0) { 251 error = ENODEV; 252 goto err; 253 } 254 255 error = ioat3_attach(device); 256 if (error != 0) 257 goto err; 258 259 error = pci_enable_busmaster(device); 260 if (error != 0) 261 goto err; 262 263 error = ioat_setup_intr(ioat); 264 if (error != 0) 265 goto err; 266 267 error = ioat_reset_hw(ioat); 268 if (error != 0) 269 goto err; 270 271 ioat_process_events(ioat); 272 ioat_setup_sysctl(device); 273 274 ioat_channel[ioat_channel_index++] = ioat; 275 ioat_test_attach(); 276 277err: 278 if (error != 0) 279 ioat_detach(device); 280 return (error); 281} 282 283static int 284ioat_detach(device_t device) 285{ 286 struct ioat_softc *ioat; 287 288 ioat = DEVICE2SOFTC(device); 289 290 ioat_test_detach(); 291 ioat_drain(ioat); 292 293 ioat_teardown_intr(ioat); 294 callout_drain(&ioat->timer); 295 296 pci_disable_busmaster(device); 297 298 if (ioat->pci_resource != NULL) 299 bus_release_resource(device, SYS_RES_MEMORY, 300 ioat->pci_resource_id, ioat->pci_resource); 301 302 if (ioat->ring != NULL) 303 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 304 305 if (ioat->comp_update != NULL) { 306 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 307 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 308 ioat->comp_update_map); 309 bus_dma_tag_destroy(ioat->comp_update_tag); 310 } 311 312 bus_dma_tag_destroy(ioat->hw_desc_tag); 313 314 return (0); 315} 316 317static int 318ioat_teardown_intr(struct ioat_softc *ioat) 319{ 320 321 if (ioat->tag != NULL) 322 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 323 324 if (ioat->res != NULL) 325 bus_release_resource(ioat->device, SYS_RES_IRQ, 326 rman_get_rid(ioat->res), ioat->res); 327 328 pci_release_msi(ioat->device); 329 return (0); 330} 331 332static int 333ioat_start_channel(struct ioat_softc *ioat) 334{ 335 uint64_t status; 336 uint32_t chanerr; 337 int i; 338 339 ioat_acquire(&ioat->dmaengine); 340 ioat_null(&ioat->dmaengine, NULL, NULL, 0); 341 ioat_release(&ioat->dmaengine); 342 343 for (i = 0; i < 100; i++) { 344 DELAY(1); 345 status = ioat_get_chansts(ioat); 346 if (is_ioat_idle(status)) 347 return (0); 348 } 349 350 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 351 ioat_log_message(0, "could not start channel: " 352 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 353 IOAT_CHANERR_STR); 354 return (ENXIO); 355} 356 357/* 358 * Initialize Hardware 359 */ 360static int 361ioat3_attach(device_t device) 362{ 363 struct ioat_softc *ioat; 364 struct ioat_descriptor **ring; 365 struct ioat_descriptor *next; 366 struct ioat_dma_hw_descriptor *dma_hw_desc; 367 int i, num_descriptors; 368 int error; 369 uint8_t xfercap; 370 371 error = 0; 372 ioat = DEVICE2SOFTC(device); 373 ioat->capabilities = ioat_read_dmacapability(ioat); 374 375 ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities, 376 IOAT_DMACAP_STR); 377 378 xfercap = ioat_read_xfercap(ioat); 379 ioat->max_xfer_size = 1 << xfercap; 380 381 /* TODO: need to check DCA here if we ever do XOR/PQ */ 382 383 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 384 mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF); 385 callout_init(&ioat->timer, 1); 386 387 ioat->is_resize_pending = FALSE; 388 ioat->is_completion_pending = FALSE; 389 ioat->is_reset_pending = FALSE; 390 ioat->is_channel_running = FALSE; 391 392 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 393 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 394 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 395 &ioat->comp_update_tag); 396 397 error = bus_dmamem_alloc(ioat->comp_update_tag, 398 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); 399 if (ioat->comp_update == NULL) 400 return (ENOMEM); 401 402 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 403 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 404 0); 405 if (error != 0) 406 return (error); 407 408 ioat->ring_size_order = IOAT_MIN_ORDER; 409 410 num_descriptors = 1 << ioat->ring_size_order; 411 412 bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, 413 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 414 sizeof(struct ioat_dma_hw_descriptor), 1, 415 sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, 416 &ioat->hw_desc_tag); 417 418 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 419 M_ZERO | M_WAITOK); 420 if (ioat->ring == NULL) 421 return (ENOMEM); 422 423 ring = ioat->ring; 424 for (i = 0; i < num_descriptors; i++) { 425 ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); 426 if (ring[i] == NULL) 427 return (ENOMEM); 428 429 ring[i]->id = i; 430 } 431 432 for (i = 0; i < num_descriptors - 1; i++) { 433 next = ring[i + 1]; 434 dma_hw_desc = ring[i]->u.dma; 435 436 dma_hw_desc->next = next->hw_desc_bus_addr; 437 } 438 439 ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; 440 441 ioat->head = ioat->hw_head = 0; 442 ioat->tail = 0; 443 ioat->last_seen = 0; 444 return (0); 445} 446 447static int 448ioat_map_pci_bar(struct ioat_softc *ioat) 449{ 450 451 ioat->pci_resource_id = PCIR_BAR(0); 452 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 453 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 454 455 if (ioat->pci_resource == NULL) { 456 ioat_log_message(0, "unable to allocate pci resource\n"); 457 return (ENODEV); 458 } 459 460 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 461 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 462 return (0); 463} 464 465static void 466ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 467{ 468 struct ioat_softc *ioat = arg; 469 470 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 471 ioat->comp_update_bus_addr = seg[0].ds_addr; 472} 473 474static void 475ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 476{ 477 bus_addr_t *baddr; 478 479 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 480 baddr = arg; 481 *baddr = segs->ds_addr; 482} 483 484/* 485 * Interrupt setup and handlers 486 */ 487static int 488ioat_setup_intr(struct ioat_softc *ioat) 489{ 490 uint32_t num_vectors; 491 int error; 492 boolean_t use_msix; 493 boolean_t force_legacy_interrupts; 494 495 use_msix = FALSE; 496 force_legacy_interrupts = FALSE; 497 498 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 499 num_vectors = 1; 500 pci_alloc_msix(ioat->device, &num_vectors); 501 if (num_vectors == 1) 502 use_msix = TRUE; 503 } 504 505 if (use_msix) { 506 ioat->rid = 1; 507 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 508 &ioat->rid, RF_ACTIVE); 509 } else { 510 ioat->rid = 0; 511 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 512 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 513 } 514 if (ioat->res == NULL) { 515 ioat_log_message(0, "bus_alloc_resource failed\n"); 516 return (ENOMEM); 517 } 518 519 ioat->tag = NULL; 520 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 521 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 522 if (error != 0) { 523 ioat_log_message(0, "bus_setup_intr failed\n"); 524 return (error); 525 } 526 527 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 528 return (0); 529} 530 531static boolean_t 532ioat_model_resets_msix(struct ioat_softc *ioat) 533{ 534 u_int32_t pciid; 535 536 pciid = pci_get_devid(ioat->device); 537 switch (pciid) { 538 /* BWD: */ 539 case 0x0c508086: 540 case 0x0c518086: 541 case 0x0c528086: 542 case 0x0c538086: 543 /* BDXDE: */ 544 case 0x6f508086: 545 case 0x6f518086: 546 case 0x6f528086: 547 case 0x6f538086: 548 return (TRUE); 549 } 550 551 return (FALSE); 552} 553 554static void 555ioat_interrupt_handler(void *arg) 556{ 557 struct ioat_softc *ioat = arg; 558 559 ioat_process_events(ioat); 560} 561 562static void 563ioat_process_events(struct ioat_softc *ioat) 564{ 565 struct ioat_descriptor *desc; 566 struct bus_dmadesc *dmadesc; 567 uint64_t comp_update, status; 568 uint32_t completed; 569 570 mtx_lock(&ioat->cleanup_lock); 571 572 completed = 0; 573 comp_update = *ioat->comp_update; 574 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 575 576 CTR0(KTR_IOAT, __func__); 577 578 if (status == ioat->last_seen) 579 goto out; 580 581 while (1) { 582 desc = ioat_get_ring_entry(ioat, ioat->tail); 583 dmadesc = &desc->bus_dmadesc; 584 CTR1(KTR_IOAT, "completing desc %d", ioat->tail); 585 586 if (dmadesc->callback_fn) 587 (*dmadesc->callback_fn)(dmadesc->callback_arg); 588 589 completed++; 590 ioat->tail++; 591 if (desc->hw_desc_bus_addr == status) 592 break; 593 } 594 595 ioat->last_seen = desc->hw_desc_bus_addr; 596 597 if (ioat->head == ioat->tail) { 598 ioat->is_completion_pending = FALSE; 599 callout_reset(&ioat->timer, IOAT_INTR_TIMO, 600 ioat_timer_callback, ioat); 601 } 602 603out: 604 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 605 mtx_unlock(&ioat->cleanup_lock); 606 607 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 608 wakeup(&ioat->tail); 609} 610 611/* 612 * User API functions 613 */ 614bus_dmaengine_t 615ioat_get_dmaengine(uint32_t index) 616{ 617 618 if (index >= ioat_channel_index) 619 return (NULL); 620 return (&ioat_get(ioat_channel[index], IOAT_DMAENGINE_REF)->dmaengine); 621} 622 623void 624ioat_put_dmaengine(bus_dmaengine_t dmaengine) 625{ 626 struct ioat_softc *ioat; 627 628 ioat = to_ioat_softc(dmaengine); 629 ioat_put(ioat, IOAT_DMAENGINE_REF); 630} 631 632void 633ioat_acquire(bus_dmaengine_t dmaengine) 634{ 635 struct ioat_softc *ioat; 636 637 ioat = to_ioat_softc(dmaengine); 638 mtx_lock(&ioat->submit_lock); 639 CTR0(KTR_IOAT, __func__); 640} 641 642void 643ioat_release(bus_dmaengine_t dmaengine) 644{ 645 struct ioat_softc *ioat; 646 647 ioat = to_ioat_softc(dmaengine); 648 CTR0(KTR_IOAT, __func__); 649 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); 650 mtx_unlock(&ioat->submit_lock); 651} 652 653static struct ioat_descriptor * 654ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 655 uint32_t size, uint64_t src, uint64_t dst, 656 bus_dmaengine_callback_t callback_fn, void *callback_arg, 657 uint32_t flags) 658{ 659 struct ioat_generic_hw_descriptor *hw_desc; 660 struct ioat_descriptor *desc; 661 int mflags; 662 663 mtx_assert(&ioat->submit_lock, MA_OWNED); 664 665 KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x", 666 flags & ~DMA_ALL_FLAGS)); 667 if ((flags & DMA_NO_WAIT) != 0) 668 mflags = M_NOWAIT; 669 else 670 mflags = M_WAITOK; 671 672 if (size > ioat->max_xfer_size) { 673 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", 674 __func__, ioat->max_xfer_size, (unsigned)size); 675 return (NULL); 676 } 677 678 if (ioat_reserve_space(ioat, 1, mflags) != 0) 679 return (NULL); 680 681 desc = ioat_get_ring_entry(ioat, ioat->head); 682 hw_desc = desc->u.generic; 683 684 hw_desc->u.control_raw = 0; 685 hw_desc->u.control_generic.op = op; 686 hw_desc->u.control_generic.completion_update = 1; 687 688 if ((flags & DMA_INT_EN) != 0) 689 hw_desc->u.control_generic.int_enable = 1; 690 691 hw_desc->size = size; 692 hw_desc->src_addr = src; 693 hw_desc->dest_addr = dst; 694 695 desc->bus_dmadesc.callback_fn = callback_fn; 696 desc->bus_dmadesc.callback_arg = callback_arg; 697 return (desc); 698} 699 700struct bus_dmadesc * 701ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 702 void *callback_arg, uint32_t flags) 703{ 704 struct ioat_dma_hw_descriptor *hw_desc; 705 struct ioat_descriptor *desc; 706 struct ioat_softc *ioat; 707 708 CTR0(KTR_IOAT, __func__); 709 ioat = to_ioat_softc(dmaengine); 710 711 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 712 callback_arg, flags); 713 if (desc == NULL) 714 return (NULL); 715 716 hw_desc = desc->u.dma; 717 hw_desc->u.control.null = 1; 718 ioat_submit_single(ioat); 719 return (&desc->bus_dmadesc); 720} 721 722struct bus_dmadesc * 723ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 724 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 725 void *callback_arg, uint32_t flags) 726{ 727 struct ioat_dma_hw_descriptor *hw_desc; 728 struct ioat_descriptor *desc; 729 struct ioat_softc *ioat; 730 731 CTR0(KTR_IOAT, __func__); 732 ioat = to_ioat_softc(dmaengine); 733 734 if (((src | dst) & (0xffffull << 48)) != 0) { 735 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 736 __func__); 737 return (NULL); 738 } 739 740 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 741 callback_arg, flags); 742 if (desc == NULL) 743 return (NULL); 744 745 hw_desc = desc->u.dma; 746 if (g_ioat_debug_level >= 3) 747 dump_descriptor(hw_desc); 748 749 ioat_submit_single(ioat); 750 return (&desc->bus_dmadesc); 751} 752 753struct bus_dmadesc * 754ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 755 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 756 uint32_t flags) 757{ 758 struct ioat_fill_hw_descriptor *hw_desc; 759 struct ioat_descriptor *desc; 760 struct ioat_softc *ioat; 761 762 CTR0(KTR_IOAT, __func__); 763 ioat = to_ioat_softc(dmaengine); 764 765 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { 766 ioat_log_message(0, "%s: Device lacks BFILL capability\n", 767 __func__); 768 return (NULL); 769 } 770 771 if ((dst & (0xffffull << 48)) != 0) { 772 ioat_log_message(0, "%s: High 16 bits of dst invalid\n", 773 __func__); 774 return (NULL); 775 } 776 777 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 778 callback_fn, callback_arg, flags); 779 if (desc == NULL) 780 return (NULL); 781 782 hw_desc = desc->u.fill; 783 if (g_ioat_debug_level >= 3) 784 dump_descriptor(hw_desc); 785 786 ioat_submit_single(ioat); 787 return (&desc->bus_dmadesc); 788} 789 790/* 791 * Ring Management 792 */ 793static inline uint32_t 794ioat_get_active(struct ioat_softc *ioat) 795{ 796 797 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 798} 799 800static inline uint32_t 801ioat_get_ring_space(struct ioat_softc *ioat) 802{ 803 804 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 805} 806 807static struct ioat_descriptor * 808ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) 809{ 810 struct ioat_generic_hw_descriptor *hw_desc; 811 struct ioat_descriptor *desc; 812 int error, busdmaflag; 813 814 error = ENOMEM; 815 hw_desc = NULL; 816 817 if ((mflags & M_WAITOK) != 0) 818 busdmaflag = BUS_DMA_WAITOK; 819 else 820 busdmaflag = BUS_DMA_NOWAIT; 821 822 desc = malloc(sizeof(*desc), M_IOAT, mflags); 823 if (desc == NULL) 824 goto out; 825 826 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, 827 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); 828 if (hw_desc == NULL) 829 goto out; 830 831 desc->u.generic = hw_desc; 832 833 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 834 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 835 busdmaflag); 836 if (error) 837 goto out; 838 839out: 840 if (error) { 841 ioat_free_ring_entry(ioat, desc); 842 return (NULL); 843 } 844 return (desc); 845} 846 847static void 848ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) 849{ 850 851 if (desc == NULL) 852 return; 853 854 if (desc->u.generic) 855 bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic, 856 ioat->hw_desc_map); 857 free(desc, M_IOAT); 858} 859 860/* 861 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 862 * for 'num_descs'. 863 * 864 * If mflags contains M_WAITOK, blocks until enough space is available. 865 * 866 * Returns zero on success, or an errno on error. If num_descs is beyond the 867 * maximum ring size, returns EINVAl; if allocation would block and mflags 868 * contains M_NOWAIT, returns EAGAIN. 869 * 870 * Must be called with the submit_lock held; returns with the lock held. The 871 * lock may be dropped to allocate the ring. 872 * 873 * (The submit_lock is needed to add any entries to the ring, so callers are 874 * assured enough room is available.) 875 */ 876static int 877ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 878{ 879 struct ioat_descriptor **new_ring; 880 uint32_t order; 881 int error; 882 883 mtx_assert(&ioat->submit_lock, MA_OWNED); 884 error = 0; 885 886 if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) { 887 error = EINVAL; 888 goto out; 889 } 890 891 for (;;) { 892 if (ioat_get_ring_space(ioat) >= num_descs) 893 goto out; 894 895 order = ioat->ring_size_order; 896 if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { 897 if ((mflags & M_WAITOK) != 0) { 898 msleep(&ioat->tail, &ioat->submit_lock, 0, 899 "ioat_rsz", 0); 900 continue; 901 } 902 903 error = EAGAIN; 904 break; 905 } 906 907 ioat->is_resize_pending = TRUE; 908 for (;;) { 909 mtx_unlock(&ioat->submit_lock); 910 911 new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), 912 TRUE, mflags); 913 914 mtx_lock(&ioat->submit_lock); 915 KASSERT(ioat->ring_size_order == order, 916 ("is_resize_pending should protect order")); 917 918 if (new_ring == NULL) { 919 KASSERT((mflags & M_WAITOK) == 0, 920 ("allocation failed")); 921 error = EAGAIN; 922 break; 923 } 924 925 error = ring_grow(ioat, order, new_ring); 926 if (error == 0) 927 break; 928 } 929 ioat->is_resize_pending = FALSE; 930 wakeup(&ioat->tail); 931 if (error) 932 break; 933 } 934 935out: 936 mtx_assert(&ioat->submit_lock, MA_OWNED); 937 return (error); 938} 939 940static struct ioat_descriptor ** 941ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, 942 int mflags) 943{ 944 struct ioat_descriptor **ring; 945 uint32_t i; 946 int error; 947 948 KASSERT(size > 0 && powerof2(size), ("bogus size")); 949 950 ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); 951 if (ring == NULL) 952 return (NULL); 953 954 if (need_dscr) { 955 error = ENOMEM; 956 for (i = size / 2; i < size; i++) { 957 ring[i] = ioat_alloc_ring_entry(ioat, mflags); 958 if (ring[i] == NULL) 959 goto out; 960 ring[i]->id = i; 961 } 962 } 963 error = 0; 964 965out: 966 if (error != 0 && ring != NULL) { 967 ioat_free_ring(ioat, size, ring); 968 ring = NULL; 969 } 970 return (ring); 971} 972 973static void 974ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 975 struct ioat_descriptor **ring) 976{ 977 uint32_t i; 978 979 for (i = 0; i < size; i++) { 980 if (ring[i] != NULL) 981 ioat_free_ring_entry(ioat, ring[i]); 982 } 983 free(ring, M_IOAT); 984} 985 986static struct ioat_descriptor * 987ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 988{ 989 990 return (ioat->ring[index % (1 << ioat->ring_size_order)]); 991} 992 993static int 994ring_grow(struct ioat_softc *ioat, uint32_t oldorder, 995 struct ioat_descriptor **newring) 996{ 997 struct ioat_descriptor *tmp, *next; 998 struct ioat_dma_hw_descriptor *hw; 999 uint32_t oldsize, newsize, head, tail, i, end; 1000 int error; 1001 1002 CTR0(KTR_IOAT, __func__); 1003 1004 mtx_assert(&ioat->submit_lock, MA_OWNED); 1005 1006 if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { 1007 error = EINVAL; 1008 goto out; 1009 } 1010 1011 oldsize = (1 << oldorder); 1012 newsize = (1 << (oldorder + 1)); 1013 1014 mtx_lock(&ioat->cleanup_lock); 1015 1016 head = ioat->head & (oldsize - 1); 1017 tail = ioat->tail & (oldsize - 1); 1018 1019 /* Copy old descriptors to new ring */ 1020 for (i = 0; i < oldsize; i++) 1021 newring[i] = ioat->ring[i]; 1022 1023 /* 1024 * If head has wrapped but tail hasn't, we must swap some descriptors 1025 * around so that tail can increment directly to head. 1026 */ 1027 if (head < tail) { 1028 for (i = 0; i <= head; i++) { 1029 tmp = newring[oldsize + i]; 1030 1031 newring[oldsize + i] = newring[i]; 1032 newring[oldsize + i]->id = oldsize + i; 1033 1034 newring[i] = tmp; 1035 newring[i]->id = i; 1036 } 1037 head += oldsize; 1038 } 1039 1040 KASSERT(head >= tail, ("invariants")); 1041 1042 /* Head didn't wrap; we only need to link in oldsize..newsize */ 1043 if (head < oldsize) { 1044 i = oldsize - 1; 1045 end = newsize; 1046 } else { 1047 /* Head did wrap; link newhead..newsize and 0..oldhead */ 1048 i = head; 1049 end = newsize + (head - oldsize) + 1; 1050 } 1051 1052 /* 1053 * Fix up hardware ring, being careful not to trample the active 1054 * section (tail -> head). 1055 */ 1056 for (; i < end; i++) { 1057 KASSERT((i & (newsize - 1)) < tail || 1058 (i & (newsize - 1)) >= head, ("trampling snake")); 1059 1060 next = newring[(i + 1) & (newsize - 1)]; 1061 hw = newring[i & (newsize - 1)]->u.dma; 1062 hw->next = next->hw_desc_bus_addr; 1063 } 1064 1065 free(ioat->ring, M_IOAT); 1066 ioat->ring = newring; 1067 ioat->ring_size_order = oldorder + 1; 1068 ioat->tail = tail; 1069 ioat->head = head; 1070 error = 0; 1071 1072 mtx_unlock(&ioat->cleanup_lock); 1073out: 1074 if (error) 1075 ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); 1076 return (error); 1077} 1078 1079static int 1080ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, 1081 struct ioat_descriptor **newring) 1082{ 1083 struct ioat_dma_hw_descriptor *hw; 1084 struct ioat_descriptor *ent, *next; 1085 uint32_t oldsize, newsize, current_idx, new_idx, i; 1086 int error; 1087 1088 CTR0(KTR_IOAT, __func__); 1089 1090 mtx_assert(&ioat->submit_lock, MA_OWNED); 1091 1092 if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { 1093 error = EINVAL; 1094 goto out_unlocked; 1095 } 1096 1097 oldsize = (1 << oldorder); 1098 newsize = (1 << (oldorder - 1)); 1099 1100 mtx_lock(&ioat->cleanup_lock); 1101 1102 /* Can't shrink below current active set! */ 1103 if (ioat_get_active(ioat) >= newsize) { 1104 error = ENOMEM; 1105 goto out; 1106 } 1107 1108 /* 1109 * Copy current descriptors to the new ring, dropping the removed 1110 * descriptors. 1111 */ 1112 for (i = 0; i < newsize; i++) { 1113 current_idx = (ioat->tail + i) & (oldsize - 1); 1114 new_idx = (ioat->tail + i) & (newsize - 1); 1115 1116 newring[new_idx] = ioat->ring[current_idx]; 1117 newring[new_idx]->id = new_idx; 1118 } 1119 1120 /* Free deleted descriptors */ 1121 for (i = newsize; i < oldsize; i++) { 1122 ent = ioat_get_ring_entry(ioat, ioat->tail + i); 1123 ioat_free_ring_entry(ioat, ent); 1124 } 1125 1126 /* Fix up hardware ring. */ 1127 hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; 1128 next = newring[(ioat->tail + newsize) & (newsize - 1)]; 1129 hw->next = next->hw_desc_bus_addr; 1130 1131 free(ioat->ring, M_IOAT); 1132 ioat->ring = newring; 1133 ioat->ring_size_order = oldorder - 1; 1134 error = 0; 1135 1136out: 1137 mtx_unlock(&ioat->cleanup_lock); 1138out_unlocked: 1139 if (error) 1140 ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); 1141 return (error); 1142} 1143 1144static void 1145ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1146{ 1147 struct ioat_descriptor *desc; 1148 1149 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1150 IOAT_CHANERR_STR); 1151 if (chanerr == 0) 1152 return; 1153 1154 mtx_lock(&ioat->submit_lock); 1155 desc = ioat_get_ring_entry(ioat, ioat->tail + 0); 1156 dump_descriptor(desc->u.raw); 1157 1158 desc = ioat_get_ring_entry(ioat, ioat->tail + 1); 1159 dump_descriptor(desc->u.raw); 1160 mtx_unlock(&ioat->submit_lock); 1161} 1162 1163static void 1164ioat_timer_callback(void *arg) 1165{ 1166 struct ioat_descriptor **newring; 1167 struct ioat_softc *ioat; 1168 uint64_t status; 1169 uint32_t chanerr, order; 1170 1171 ioat = arg; 1172 ioat_log_message(1, "%s\n", __func__); 1173 1174 if (ioat->is_completion_pending) { 1175 status = ioat_get_chansts(ioat); 1176 1177 /* 1178 * When halted due to errors, check for channel programming 1179 * errors before advancing the completion state. 1180 */ 1181 if (is_ioat_halted(status)) { 1182 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1183 ioat_halted_debug(ioat, chanerr); 1184 } 1185 ioat_process_events(ioat); 1186 } else { 1187 mtx_lock(&ioat->submit_lock); 1188 order = ioat->ring_size_order; 1189 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { 1190 mtx_unlock(&ioat->submit_lock); 1191 goto out; 1192 } 1193 ioat->is_resize_pending = TRUE; 1194 mtx_unlock(&ioat->submit_lock); 1195 1196 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, 1197 M_NOWAIT); 1198 1199 mtx_lock(&ioat->submit_lock); 1200 KASSERT(ioat->ring_size_order == order, 1201 ("resize_pending protects order")); 1202 1203 if (newring != NULL) 1204 ring_shrink(ioat, order, newring); 1205 1206 ioat->is_resize_pending = FALSE; 1207 mtx_unlock(&ioat->submit_lock); 1208 1209out: 1210 /* Slowly scale the ring down if idle. */ 1211 if (ioat->ring_size_order > IOAT_MIN_ORDER) 1212 callout_reset(&ioat->timer, 10 * hz, 1213 ioat_timer_callback, ioat); 1214 } 1215} 1216 1217/* 1218 * Support Functions 1219 */ 1220static void 1221ioat_submit_single(struct ioat_softc *ioat) 1222{ 1223 1224 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); 1225 atomic_add_rel_int(&ioat->head, 1); 1226 atomic_add_rel_int(&ioat->hw_head, 1); 1227 1228 if (!ioat->is_completion_pending) { 1229 ioat->is_completion_pending = TRUE; 1230 callout_reset(&ioat->timer, IOAT_INTR_TIMO, 1231 ioat_timer_callback, ioat); 1232 } 1233} 1234 1235static int 1236ioat_reset_hw(struct ioat_softc *ioat) 1237{ 1238 uint64_t status; 1239 uint32_t chanerr; 1240 unsigned timeout; 1241 1242 status = ioat_get_chansts(ioat); 1243 if (is_ioat_active(status) || is_ioat_idle(status)) 1244 ioat_suspend(ioat); 1245 1246 /* Wait at most 20 ms */ 1247 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1248 timeout < 20; timeout++) { 1249 DELAY(1000); 1250 status = ioat_get_chansts(ioat); 1251 } 1252 if (timeout == 20) 1253 return (ETIMEDOUT); 1254 1255 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1256 1257 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1258 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1259 1260 /* 1261 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1262 * that can cause stability issues for IOAT v3. 1263 */ 1264 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1265 4); 1266 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1267 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1268 1269 /* 1270 * BDXDE and BWD models reset MSI-X registers on device reset. 1271 * Save/restore their contents manually. 1272 */ 1273 if (ioat_model_resets_msix(ioat)) { 1274 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1275 pci_save_state(ioat->device); 1276 } 1277 1278 ioat_reset(ioat); 1279 1280 /* Wait at most 20 ms */ 1281 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1282 DELAY(1000); 1283 if (timeout == 20) 1284 return (ETIMEDOUT); 1285 1286 if (ioat_model_resets_msix(ioat)) { 1287 ioat_log_message(1, "device resets registers; restored\n"); 1288 pci_restore_state(ioat->device); 1289 } 1290 1291 /* Reset attempts to return the hardware to "halted." */ 1292 status = ioat_get_chansts(ioat); 1293 if (is_ioat_active(status) || is_ioat_idle(status)) { 1294 /* So this really shouldn't happen... */ 1295 ioat_log_message(0, "Device is active after a reset?\n"); 1296 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1297 return (0); 1298 } 1299 1300 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1301 ioat_halted_debug(ioat, chanerr); 1302 if (chanerr != 0) 1303 return (EIO); 1304 1305 /* 1306 * Bring device back online after reset. Writing CHAINADDR brings the 1307 * device back to active. 1308 * 1309 * The internal ring counter resets to zero, so we have to start over 1310 * at zero as well. 1311 */ 1312 ioat->tail = ioat->head = ioat->hw_head = 0; 1313 ioat->last_seen = 0; 1314 1315 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1316 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1317 ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); 1318 return (ioat_start_channel(ioat)); 1319} 1320 1321static int 1322sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1323{ 1324 struct ioat_softc *ioat; 1325 int error, arg; 1326 1327 ioat = arg1; 1328 1329 arg = 0; 1330 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1331 if (error != 0 || req->newptr == NULL) 1332 return (error); 1333 1334 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1335 if (error != 0) 1336 return (error); 1337 1338 if (arg != 0) 1339 error = ioat_reset_hw(ioat); 1340 1341 return (error); 1342} 1343 1344static void 1345dump_descriptor(void *hw_desc) 1346{ 1347 int i, j; 1348 1349 for (i = 0; i < 2; i++) { 1350 for (j = 0; j < 8; j++) 1351 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1352 printf("\n"); 1353 } 1354} 1355 1356static void 1357ioat_setup_sysctl(device_t device) 1358{ 1359 struct sysctl_oid_list *par; 1360 struct sysctl_ctx_list *ctx; 1361 struct sysctl_oid *tree; 1362 struct ioat_softc *ioat; 1363 1364 ioat = DEVICE2SOFTC(device); 1365 ctx = device_get_sysctl_ctx(device); 1366 tree = device_get_sysctl_tree(device); 1367 par = SYSCTL_CHILDREN(tree); 1368 1369 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1370 &ioat->version, 0, "HW version (0xMM form)"); 1371 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1372 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1373 1374 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1375 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1376 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0, 1377 "SW descriptor head pointer index"); 1378 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0, 1379 "SW descriptor tail pointer index"); 1380 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "hw_head", CTLFLAG_RD, 1381 &ioat->hw_head, 0, "HW DMACOUNT"); 1382 1383 SYSCTL_ADD_UQUAD(ctx, par, OID_AUTO, "last_completion", CTLFLAG_RD, 1384 ioat->comp_update, "HW addr of last completion"); 1385 1386 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_resize_pending", CTLFLAG_RD, 1387 &ioat->is_resize_pending, 0, "resize pending"); 1388 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_completion_pending", CTLFLAG_RD, 1389 &ioat->is_completion_pending, 0, "completion pending"); 1390 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 1391 &ioat->is_reset_pending, 0, "reset pending"); 1392 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_channel_running", CTLFLAG_RD, 1393 &ioat->is_channel_running, 0, "channel running"); 1394 1395 SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset", 1396 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1397 "Set to non-zero to reset the hardware"); 1398} 1399 1400static inline struct ioat_softc * 1401ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1402{ 1403 uint32_t old; 1404 1405 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1406 1407 old = atomic_fetchadd_32(&ioat->refcnt, 1); 1408 KASSERT(old < UINT32_MAX, ("refcnt overflow")); 1409 1410#ifdef INVARIANTS 1411 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); 1412 KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); 1413#endif 1414 1415 return (ioat); 1416} 1417 1418static inline void 1419ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1420{ 1421 uint32_t old; 1422 1423 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1424 1425 if (n == 0) 1426 return; 1427 1428#ifdef INVARIANTS 1429 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); 1430 KASSERT(old >= n, ("refcnt kind underflow")); 1431#endif 1432 1433 /* Skip acquiring the lock if resulting refcnt > 0. */ 1434 for (;;) { 1435 old = ioat->refcnt; 1436 if (old <= n) 1437 break; 1438 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 1439 return; 1440 } 1441 1442 mtx_lock(IOAT_REFLK); 1443 old = atomic_fetchadd_32(&ioat->refcnt, -n); 1444 KASSERT(old >= n, ("refcnt error")); 1445 1446 if (old == n) 1447 wakeup(IOAT_REFLK); 1448 mtx_unlock(IOAT_REFLK); 1449} 1450 1451static inline void 1452ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1453{ 1454 1455 ioat_putn(ioat, 1, kind); 1456} 1457 1458static void 1459ioat_drain(struct ioat_softc *ioat) 1460{ 1461 1462 mtx_lock(IOAT_REFLK); 1463 while (ioat->refcnt > 0) 1464 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 1465 mtx_unlock(IOAT_REFLK); 1466} 1467