Lines Matching defs:ioat

29 __FBSDID("$FreeBSD: stable/11/sys/dev/ioat/ioat.c 356430 2020-01-07 00:54:45Z mav $");
59 #include "ioat.h"
70 static int ioat_setup_intr(struct ioat_softc *ioat);
71 static int ioat_teardown_intr(struct ioat_softc *ioat);
73 static int ioat_start_channel(struct ioat_softc *ioat);
74 static int ioat_map_pci_bar(struct ioat_softc *ioat);
78 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
80 static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr);
81 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
82 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
93 static void ioat_submit_single(struct ioat_softc *ioat);
96 static int ioat_reset_hw(struct ioat_softc *ioat);
106 device_printf(ioat->device, __VA_ARGS__); \
110 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
111 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
119 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
137 "ioat",
143 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
144 MODULE_VERSION(ioat, 1);
154 MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF);
260 struct ioat_softc *ioat;
263 ioat = DEVICE2SOFTC(device);
264 ioat->device = device;
266 error = ioat_map_pci_bar(ioat);
270 ioat->version = ioat_read_cbver(ioat);
271 if (ioat->version < IOAT_VER_3_0) {
284 error = ioat_setup_intr(ioat);
288 error = ioat_reset_hw(ioat);
292 ioat_process_events(ioat, FALSE);
306 ioat->chan_idx = i;
307 ioat_channel[i] = ioat;
323 struct ioat_softc *ioat;
325 ioat = DEVICE2SOFTC(device);
328 ioat_channel[ioat->chan_idx] = NULL;
335 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
337 mtx_lock(&ioat->submit_lock);
338 ioat->quiescing = TRUE;
339 ioat->destroying = TRUE;
340 wakeup(&ioat->quiescing);
341 wakeup(&ioat->resetting);
343 ioat_drain_locked(ioat);
344 mtx_unlock(&ioat->submit_lock);
345 mtx_lock(&ioat->cleanup_lock);
346 while (ioat_get_active(ioat) > 0)
347 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
348 mtx_unlock(&ioat->cleanup_lock);
350 ioat_teardown_intr(ioat);
351 callout_drain(&ioat->poll_timer);
355 if (ioat->pci_resource != NULL)
357 ioat->pci_resource_id, ioat->pci_resource);
359 if (ioat->ring != NULL)
360 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
362 if (ioat->comp_update != NULL) {
363 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
364 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
365 ioat->comp_update_map);
366 bus_dma_tag_destroy(ioat->comp_update_tag);
369 if (ioat->hw_desc_ring != NULL) {
370 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map);
371 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring,
372 ioat->hw_desc_map);
373 bus_dma_tag_destroy(ioat->hw_desc_tag);
380 ioat_teardown_intr(struct ioat_softc *ioat)
383 if (ioat->tag != NULL)
384 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
386 if (ioat->res != NULL)
387 bus_release_resource(ioat->device, SYS_RES_IRQ,
388 rman_get_rid(ioat->res), ioat->res);
390 pci_release_msi(ioat->device);
395 ioat_start_channel(struct ioat_softc *ioat)
404 ioat_acquire(&ioat->dmaengine);
407 desc = ioat_get_ring_entry(ioat, ioat->head);
408 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma;
422 ioat_submit_single(ioat);
423 ioat_release(&ioat->dmaengine);
427 status = ioat_get_chansts(ioat);
432 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
445 struct ioat_softc *ioat;
455 ioat = DEVICE2SOFTC(device);
456 ioat->capabilities = ioat_read_dmacapability(ioat);
458 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
461 xfercap = ioat_read_xfercap(ioat);
462 ioat->max_xfer_size = 1 << xfercap;
464 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
466 if (ioat->intrdelay_supported)
467 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
471 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
472 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
473 callout_init(&ioat->poll_timer, 1);
474 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
477 mtx_lock(&ioat->cleanup_lock);
478 mtx_lock(&ioat->submit_lock);
479 mtx_unlock(&ioat->submit_lock);
480 mtx_unlock(&ioat->cleanup_lock);
482 ioat->is_submitter_processing = FALSE;
484 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
487 &ioat->comp_update_tag);
489 error = bus_dmamem_alloc(ioat->comp_update_tag,
490 (void **)&ioat->comp_update, BUS_DMA_ZERO | BUS_DMA_WAITOK,
491 &ioat->comp_update_map);
492 if (ioat->comp_update == NULL)
495 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
496 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
501 ioat->ring_size_order = g_ioat_ring_order;
502 num_descriptors = 1 << ioat->ring_size_order;
505 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
508 &ioat->hw_desc_tag);
512 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc,
513 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map);
517 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
518 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_NOWAIT);
522 ioat->hw_desc_ring = hw_desc;
524 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
527 ring = ioat->ring;
534 dma_hw_desc = &ioat->hw_desc_ring[i].dma;
535 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1);
538 ioat->tail = ioat->head = 0;
539 *ioat->comp_update = ioat->last_seen =
540 RING_PHYS_ADDR(ioat, ioat->tail - 1);
545 ioat_map_pci_bar(struct ioat_softc *ioat)
548 ioat->pci_resource_id = PCIR_BAR(0);
549 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
550 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
552 if (ioat->pci_resource == NULL) {
557 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
558 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
565 struct ioat_softc *ioat = arg;
568 ioat->comp_update_bus_addr = seg[0].ds_addr;
585 ioat_setup_intr(struct ioat_softc *ioat)
595 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
597 pci_alloc_msix(ioat->device, &num_vectors);
603 ioat->rid = 1;
604 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
605 &ioat->rid, RF_ACTIVE);
607 ioat->rid = 0;
608 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
609 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
611 if (ioat->res == NULL) {
616 ioat->tag = NULL;
617 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
618 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
624 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
629 ioat_model_resets_msix(struct ioat_softc *ioat)
633 pciid = pci_get_devid(ioat->device);
654 struct ioat_softc *ioat = arg;
656 ioat->stats.interrupts++;
657 ioat_process_events(ioat, TRUE);
677 ioat_process_events(struct ioat_softc *ioat, boolean_t intr)
686 mtx_lock(&ioat->cleanup_lock);
688 if (!mtx_trylock(&ioat->cleanup_lock))
697 if (ioat->resetting_cleanup) {
698 mtx_unlock(&ioat->cleanup_lock);
703 comp_update = *ioat->comp_update;
706 if (status < ioat->hw_desc_bus_addr ||
707 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) *
710 (uintmax_t)status, ioat->chan_idx);
712 if (status == ioat->last_seen) {
720 __func__, ioat->chan_idx, comp_update, ioat->last_seen);
722 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) {
723 desc = ioat_get_ring_entry(ioat, ioat->tail);
726 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
733 ioat->tail++;
736 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
739 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1);
740 ioat->stats.descriptors_processed += completed;
741 wakeup(&ioat->tail);
745 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
746 mtx_unlock(&ioat->cleanup_lock);
755 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
759 ioat->stats.channel_halts++;
765 mtx_lock(&ioat->submit_lock);
766 ioat->quiescing = TRUE;
767 mtx_unlock(&ioat->submit_lock);
776 mtx_lock(&ioat->cleanup_lock);
777 ioat->resetting_cleanup = TRUE;
779 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
781 ioat_halted_debug(ioat, chanerr);
782 ioat->stats.last_halt_chanerr = chanerr;
784 while (ioat_get_active(ioat) > 0) {
785 desc = ioat_get_ring_entry(ioat, ioat->tail);
788 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
795 ioat->tail++;
796 ioat->stats.descriptors_processed++;
797 ioat->stats.descriptors_error++;
800 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
803 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
805 mtx_unlock(&ioat->cleanup_lock);
808 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
816 struct ioat_softc *ioat;
819 ioat = ctx;
822 error = ioat_reset_hw(ioat);
840 struct ioat_softc *ioat;
849 (ioat = ioat_channel[index]) == NULL) {
853 mtx_lock(&ioat->submit_lock);
856 if (ioat->destroying) {
857 mtx_unlock(&ioat->submit_lock);
861 ioat_get(ioat);
862 if (ioat->quiescing) {
864 ioat_put(ioat);
865 mtx_unlock(&ioat->submit_lock);
869 while (ioat->quiescing && !ioat->destroying)
870 msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0);
872 if (ioat->destroying) {
873 ioat_put(ioat);
874 mtx_unlock(&ioat->submit_lock);
878 mtx_unlock(&ioat->submit_lock);
879 return (&ioat->dmaengine);
885 struct ioat_softc *ioat;
887 ioat = to_ioat_softc(dmaengine);
888 mtx_lock(&ioat->submit_lock);
889 ioat_put(ioat);
890 mtx_unlock(&ioat->submit_lock);
896 struct ioat_softc *ioat;
898 ioat = to_ioat_softc(dmaengine);
899 return (ioat->version);
905 struct ioat_softc *ioat;
907 ioat = to_ioat_softc(dmaengine);
908 return (ioat->max_xfer_size);
914 struct ioat_softc *ioat;
916 ioat = to_ioat_softc(dmaengine);
917 return (ioat->capabilities);
923 struct ioat_softc *ioat;
925 ioat = to_ioat_softc(dmaengine);
926 return (bus_get_domain(ioat->device, domain));
932 struct ioat_softc *ioat;
934 ioat = to_ioat_softc(dmaengine);
935 if (!ioat->intrdelay_supported)
937 if (delay > ioat->intrdelay_max)
940 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
941 ioat->cached_intrdelay =
942 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
949 struct ioat_softc *ioat;
951 ioat = to_ioat_softc(dmaengine);
952 return (ioat->intrdelay_max);
958 struct ioat_softc *ioat;
960 ioat = to_ioat_softc(dmaengine);
961 mtx_lock(&ioat->submit_lock);
962 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
963 ioat->acq_head = ioat->head;
969 struct ioat_softc *ioat;
972 ioat = to_ioat_softc(dmaengine);
975 error = ioat_reserve_space(ioat, n, mflags);
984 struct ioat_softc *ioat;
986 ioat = to_ioat_softc(dmaengine);
988 ioat->chan_idx, ioat->head);
991 ioat->chan_idx, ioat->head);
993 if (ioat->acq_head != ioat->head) {
994 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET,
995 (uint16_t)ioat->head);
997 if (!callout_pending(&ioat->poll_timer)) {
998 callout_reset(&ioat->poll_timer, 1,
999 ioat_poll_timer_callback, ioat);
1002 mtx_unlock(&ioat->submit_lock);
1006 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
1015 mtx_assert(&ioat->submit_lock, MA_OWNED);
1019 KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)",
1020 __func__, (unsigned)size, ioat->max_xfer_size));
1027 if (ioat_reserve_space(ioat, 1, mflags) != 0)
1030 desc = ioat_get_ring_entry(ioat, ioat->head);
1031 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic;
1057 struct ioat_softc *ioat;
1059 ioat = to_ioat_softc(dmaengine);
1060 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1062 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
1067 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1069 ioat_submit_single(ioat);
1080 struct ioat_softc *ioat;
1082 ioat = to_ioat_softc(dmaengine);
1087 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
1092 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1096 ioat_submit_single(ioat);
1098 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len);
1109 struct ioat_softc *ioat;
1111 ioat = to_ioat_softc(dmaengine);
1112 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1119 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
1124 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1137 ioat_submit_single(ioat);
1148 struct ioat_softc *ioat;
1152 ioat = to_ioat_softc(dmaengine);
1153 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1155 KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0,
1182 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1187 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1202 ioat_submit_single(ioat);
1213 struct ioat_softc *ioat;
1217 ioat = to_ioat_softc(dmaengine);
1218 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1220 KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0,
1247 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1252 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1267 ioat_submit_single(ioat);
1278 struct ioat_softc *ioat;
1280 ioat = to_ioat_softc(dmaengine);
1281 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1283 KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0,
1288 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
1293 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill;
1297 ioat_submit_single(ioat);
1305 ioat_get_active(struct ioat_softc *ioat)
1308 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1312 ioat_get_ring_space(struct ioat_softc *ioat)
1315 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1335 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1340 mtx_assert(&ioat->submit_lock, MA_OWNED);
1344 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) {
1350 if (ioat->quiescing) {
1355 if (ioat_get_ring_space(ioat) >= num_descs)
1359 ioat->chan_idx, num_descs);
1361 if (!dug && !ioat->is_submitter_processing) {
1362 ioat->is_submitter_processing = TRUE;
1363 mtx_unlock(&ioat->submit_lock);
1366 __func__, ioat->chan_idx);
1367 ioat_process_events(ioat, FALSE);
1369 mtx_lock(&ioat->submit_lock);
1371 KASSERT(ioat->is_submitter_processing == TRUE,
1373 ioat->is_submitter_processing = FALSE;
1374 wakeup(&ioat->tail);
1383 __func__, ioat->chan_idx);
1384 msleep(&ioat->tail, &ioat->submit_lock, 0,
1390 mtx_assert(&ioat->submit_lock, MA_OWNED);
1391 KASSERT(!ioat->quiescing || error == ENXIO,
1397 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1405 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1408 return (&ioat->ring[index % (1 << ioat->ring_size_order)]);
1412 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index)
1415 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]);
1419 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1428 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1430 desc = ioat_get_descriptor(ioat, ioat->tail + 0);
1433 desc = ioat_get_descriptor(ioat, ioat->tail + 1);
1440 struct ioat_softc *ioat;
1442 ioat = arg;
1445 ioat_process_events(ioat, FALSE);
1447 mtx_lock(&ioat->submit_lock);
1448 if (ioat_get_active(ioat) > 0)
1449 callout_schedule(&ioat->poll_timer, 1);
1450 mtx_unlock(&ioat->submit_lock);
1457 ioat_submit_single(struct ioat_softc *ioat)
1460 mtx_assert(&ioat->submit_lock, MA_OWNED);
1462 ioat->head++;
1464 ioat->chan_idx, ioat->head, ioat->tail);
1466 ioat->stats.descriptors_submitted++;
1470 ioat_reset_hw(struct ioat_softc *ioat)
1477 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1479 mtx_lock(&ioat->submit_lock);
1480 while (ioat->resetting && !ioat->destroying)
1481 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0);
1482 if (ioat->destroying) {
1483 mtx_unlock(&ioat->submit_lock);
1486 ioat->resetting = TRUE;
1487 ioat->quiescing = TRUE;
1488 mtx_unlock(&ioat->submit_lock);
1489 mtx_lock(&ioat->cleanup_lock);
1490 while (ioat_get_active(ioat) > 0)
1491 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
1497 ioat->resetting_cleanup = TRUE;
1498 mtx_unlock(&ioat->cleanup_lock);
1501 ioat->chan_idx);
1503 status = ioat_get_chansts(ioat);
1505 ioat_suspend(ioat);
1511 status = ioat_get_chansts(ioat);
1518 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1520 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1521 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1524 ioat->chan_idx);
1530 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1532 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1533 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1539 if (ioat_model_resets_msix(ioat)) {
1541 pci_save_state(ioat->device);
1544 ioat_reset(ioat);
1546 ioat->chan_idx);
1549 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1556 if (ioat_model_resets_msix(ioat)) {
1558 pci_restore_state(ioat->device);
1562 status = ioat_get_chansts(ioat);
1566 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1571 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1573 mtx_lock(&ioat->cleanup_lock);
1574 ioat_halted_debug(ioat, chanerr);
1575 mtx_unlock(&ioat->cleanup_lock);
1587 ioat->tail = ioat->head = 0;
1588 *ioat->comp_update = ioat->last_seen =
1589 RING_PHYS_ADDR(ioat, ioat->tail - 1);
1591 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1592 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1593 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0));
1596 ioat->chan_idx);
1601 error = ioat_start_channel(ioat);
1603 ioat->chan_idx);
1609 mtx_lock(&ioat->cleanup_lock);
1610 ioat->resetting_cleanup = FALSE;
1611 mtx_unlock(&ioat->cleanup_lock);
1614 mtx_lock(&ioat->submit_lock);
1615 ioat->quiescing = FALSE;
1616 wakeup(&ioat->quiescing);
1618 ioat->resetting = FALSE;
1619 wakeup(&ioat->resetting);
1621 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx);
1622 mtx_unlock(&ioat->submit_lock);
1630 struct ioat_softc *ioat;
1635 ioat = arg1;
1637 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1671 struct ioat_softc *ioat;
1678 ioat = arg1;
1681 if (ioat->stats.interrupts == 0) {
1685 rate = ioat->stats.descriptors_processed * factor /
1686 ioat->stats.interrupts;
1701 struct ioat_softc *ioat;
1704 ioat = arg1;
1716 error = ioat_reset_hw(ioat);
1739 struct ioat_softc *ioat;
1741 ioat = DEVICE2SOFTC(device);
1747 &ioat->version, 0, "HW version (0xMM form)");
1749 &ioat->max_xfer_size, 0, "HW maximum transfer size");
1751 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1753 &ioat->intrdelay_max, 0,
1761 &ioat->ring_size_order, 0, "SW descriptor ring size order");
1762 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
1764 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
1768 ioat->comp_update, "HW addr of last completion");
1771 CTLFLAG_RD, &ioat->is_submitter_processing, 0,
1775 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
1779 &ioat->cached_intrdelay, 0,
1787 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1795 &ioat->stats.interrupts,
1798 &ioat->stats.descriptors_processed,
1801 &ioat->stats.descriptors_submitted,
1804 &ioat->stats.descriptors_error,
1807 &ioat->stats.channel_halts, 0,
1810 &ioat->stats.last_halt_chanerr, 0,
1814 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
1819 ioat_get(struct ioat_softc *ioat)
1822 mtx_assert(&ioat->submit_lock, MA_OWNED);
1823 KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow"));
1825 ioat->refcnt++;
1829 ioat_put(struct ioat_softc *ioat)
1832 mtx_assert(&ioat->submit_lock, MA_OWNED);
1833 KASSERT(ioat->refcnt >= 1, ("refcnt error"));
1835 if (--ioat->refcnt == 0)
1836 wakeup(&ioat->refcnt);
1840 ioat_drain_locked(struct ioat_softc *ioat)
1843 mtx_assert(&ioat->submit_lock, MA_OWNED);
1845 while (ioat->refcnt > 0)
1846 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0);
1852 DB_SHOW_COMMAND(ioat, db_show_ioat)
1864 db_printf("ioat softc at %p\n", sc);
1962 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);