1287117Scem/*- 2287117Scem * Copyright (C) 2012 Intel Corporation 3287117Scem * All rights reserved. 4344650Smav * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org> 5287117Scem * 6287117Scem * Redistribution and use in source and binary forms, with or without 7287117Scem * modification, are permitted provided that the following conditions 8287117Scem * are met: 9287117Scem * 1. Redistributions of source code must retain the above copyright 10287117Scem * notice, this list of conditions and the following disclaimer. 11287117Scem * 2. Redistributions in binary form must reproduce the above copyright 12287117Scem * notice, this list of conditions and the following disclaimer in the 13287117Scem * documentation and/or other materials provided with the distribution. 14287117Scem * 15287117Scem * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16287117Scem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17287117Scem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18287117Scem * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19287117Scem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20287117Scem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21287117Scem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22287117Scem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23287117Scem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24287117Scem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25287117Scem * SUCH DAMAGE. 26287117Scem */ 27287117Scem 28287117Scem#include <sys/cdefs.h> 29287117Scem__FBSDID("$FreeBSD: stable/11/sys/dev/ioat/ioat.c 356430 2020-01-07 00:54:45Z mav $"); 30287117Scem 31301712Scem#include "opt_ddb.h" 32301712Scem 33287117Scem#include <sys/param.h> 34287117Scem#include <sys/systm.h> 35287117Scem#include <sys/bus.h> 36287117Scem#include <sys/conf.h> 37344401Smav#include <sys/fail.h> 38287117Scem#include <sys/ioccom.h> 39287117Scem#include <sys/kernel.h> 40287117Scem#include <sys/lock.h> 41287117Scem#include <sys/malloc.h> 42287117Scem#include <sys/module.h> 43287117Scem#include <sys/mutex.h> 44287117Scem#include <sys/rman.h> 45290229Scem#include <sys/sbuf.h> 46287117Scem#include <sys/sysctl.h> 47295605Scem#include <sys/taskqueue.h> 48287117Scem#include <sys/time.h> 49287117Scem#include <dev/pci/pcireg.h> 50287117Scem#include <dev/pci/pcivar.h> 51287117Scem#include <machine/bus.h> 52287117Scem#include <machine/resource.h> 53287117Scem#include <machine/stdarg.h> 54287117Scem 55301712Scem#ifdef DDB 56301712Scem#include <ddb/ddb.h> 57301712Scem#endif 58301712Scem 59287117Scem#include "ioat.h" 60287117Scem#include "ioat_hw.h" 61287117Scem#include "ioat_internal.h" 62287117Scem 63298987Scem#ifndef BUS_SPACE_MAXADDR_40BIT 64298987Scem#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL 65298987Scem#endif 66289904Scem 67287117Scemstatic int ioat_probe(device_t device); 68287117Scemstatic int ioat_attach(device_t device); 69287117Scemstatic int ioat_detach(device_t device); 70287403Scemstatic int ioat_setup_intr(struct ioat_softc *ioat); 71287403Scemstatic int ioat_teardown_intr(struct ioat_softc *ioat); 72287117Scemstatic int ioat3_attach(device_t device); 73289912Scemstatic int ioat_start_channel(struct ioat_softc *ioat); 74287117Scemstatic int ioat_map_pci_bar(struct ioat_softc *ioat); 75287117Scemstatic void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 76287117Scem int error); 77287117Scemstatic void ioat_interrupt_handler(void *arg); 78287414Scemstatic boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 79290229Scemstatic int chanerr_to_errno(uint32_t); 80344650Smavstatic void ioat_process_events(struct ioat_softc *ioat, boolean_t intr); 81287117Scemstatic inline uint32_t ioat_get_active(struct ioat_softc *ioat); 82287117Scemstatic inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 83289982Scemstatic void ioat_free_ring(struct ioat_softc *, uint32_t size, 84344401Smav struct ioat_descriptor *); 85289982Scemstatic int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 86344401Smavstatic union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *, 87287117Scem uint32_t index); 88344401Smavstatic struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *, 89344401Smav uint32_t index); 90290229Scemstatic void ioat_halted_debug(struct ioat_softc *, uint32_t); 91302352Scemstatic void ioat_poll_timer_callback(void *arg); 92287117Scemstatic void dump_descriptor(void *hw_desc); 93287117Scemstatic void ioat_submit_single(struct ioat_softc *ioat); 94287117Scemstatic void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 95287117Scem int error); 96287117Scemstatic int ioat_reset_hw(struct ioat_softc *ioat); 97295605Scemstatic void ioat_reset_hw_task(void *, int); 98287117Scemstatic void ioat_setup_sysctl(device_t device); 99289908Scemstatic int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 100344650Smavstatic void ioat_get(struct ioat_softc *); 101344650Smavstatic void ioat_put(struct ioat_softc *); 102290131Scemstatic void ioat_drain_locked(struct ioat_softc *); 103287117Scem 104289776Scem#define ioat_log_message(v, ...) do { \ 105289776Scem if ((v) <= g_ioat_debug_level) { \ 106289776Scem device_printf(ioat->device, __VA_ARGS__); \ 107289776Scem } \ 108289776Scem} while (0) 109289776Scem 110287117ScemMALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 111287117ScemSYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 112287117Scem 113287117Scemstatic int g_force_legacy_interrupts; 114287117ScemSYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 115287117Scem &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 116287117Scem 117289776Scemint g_ioat_debug_level = 0; 118287117ScemSYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 119287117Scem 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 120287117Scem 121344401Smavunsigned g_ioat_ring_order = 13; 122344401SmavSYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order, 123344401Smav 0, "Set IOAT ring order. (1 << this) == ring size."); 124344401Smav 125287117Scem/* 126287117Scem * OS <-> Driver interface structures 127287117Scem */ 128287117Scemstatic device_method_t ioat_pci_methods[] = { 129287117Scem /* Device interface */ 130287117Scem DEVMETHOD(device_probe, ioat_probe), 131287117Scem DEVMETHOD(device_attach, ioat_attach), 132287117Scem DEVMETHOD(device_detach, ioat_detach), 133299015Sngie DEVMETHOD_END 134287117Scem}; 135287117Scem 136287117Scemstatic driver_t ioat_pci_driver = { 137287117Scem "ioat", 138287117Scem ioat_pci_methods, 139287117Scem sizeof(struct ioat_softc), 140287117Scem}; 141287117Scem 142287117Scemstatic devclass_t ioat_devclass; 143287117ScemDRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 144291826ScemMODULE_VERSION(ioat, 1); 145287117Scem 146287117Scem/* 147287117Scem * Private data structures 148287117Scem */ 149287117Scemstatic struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 150301296Scemstatic unsigned ioat_channel_index = 0; 151301296ScemSYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 152287117Scem "Number of IOAT channels attached"); 153344650Smavstatic struct mtx ioat_list_mtx; 154344650SmavMTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF); 155287117Scem 156287117Scemstatic struct _pcsid 157287117Scem{ 158287117Scem u_int32_t type; 159287117Scem const char *desc; 160287117Scem} pci_ids[] = { 161287117Scem { 0x34308086, "TBG IOAT Ch0" }, 162287117Scem { 0x34318086, "TBG IOAT Ch1" }, 163287117Scem { 0x34328086, "TBG IOAT Ch2" }, 164287117Scem { 0x34338086, "TBG IOAT Ch3" }, 165287117Scem { 0x34298086, "TBG IOAT Ch4" }, 166287117Scem { 0x342a8086, "TBG IOAT Ch5" }, 167287117Scem { 0x342b8086, "TBG IOAT Ch6" }, 168287117Scem { 0x342c8086, "TBG IOAT Ch7" }, 169287117Scem 170287117Scem { 0x37108086, "JSF IOAT Ch0" }, 171287117Scem { 0x37118086, "JSF IOAT Ch1" }, 172287117Scem { 0x37128086, "JSF IOAT Ch2" }, 173287117Scem { 0x37138086, "JSF IOAT Ch3" }, 174287117Scem { 0x37148086, "JSF IOAT Ch4" }, 175287117Scem { 0x37158086, "JSF IOAT Ch5" }, 176287117Scem { 0x37168086, "JSF IOAT Ch6" }, 177287117Scem { 0x37178086, "JSF IOAT Ch7" }, 178287117Scem { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 179287117Scem { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 180287117Scem 181287117Scem { 0x3c208086, "SNB IOAT Ch0" }, 182287117Scem { 0x3c218086, "SNB IOAT Ch1" }, 183287117Scem { 0x3c228086, "SNB IOAT Ch2" }, 184287117Scem { 0x3c238086, "SNB IOAT Ch3" }, 185287117Scem { 0x3c248086, "SNB IOAT Ch4" }, 186287117Scem { 0x3c258086, "SNB IOAT Ch5" }, 187287117Scem { 0x3c268086, "SNB IOAT Ch6" }, 188287117Scem { 0x3c278086, "SNB IOAT Ch7" }, 189287117Scem { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 190287117Scem { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 191287117Scem 192287117Scem { 0x0e208086, "IVB IOAT Ch0" }, 193287117Scem { 0x0e218086, "IVB IOAT Ch1" }, 194287117Scem { 0x0e228086, "IVB IOAT Ch2" }, 195287117Scem { 0x0e238086, "IVB IOAT Ch3" }, 196287117Scem { 0x0e248086, "IVB IOAT Ch4" }, 197287117Scem { 0x0e258086, "IVB IOAT Ch5" }, 198287117Scem { 0x0e268086, "IVB IOAT Ch6" }, 199287117Scem { 0x0e278086, "IVB IOAT Ch7" }, 200287117Scem { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 201287117Scem { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 202287117Scem 203287117Scem { 0x2f208086, "HSW IOAT Ch0" }, 204287117Scem { 0x2f218086, "HSW IOAT Ch1" }, 205287117Scem { 0x2f228086, "HSW IOAT Ch2" }, 206287117Scem { 0x2f238086, "HSW IOAT Ch3" }, 207287117Scem { 0x2f248086, "HSW IOAT Ch4" }, 208287117Scem { 0x2f258086, "HSW IOAT Ch5" }, 209287117Scem { 0x2f268086, "HSW IOAT Ch6" }, 210287117Scem { 0x2f278086, "HSW IOAT Ch7" }, 211287117Scem { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 212287117Scem { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 213287117Scem 214287117Scem { 0x0c508086, "BWD IOAT Ch0" }, 215287117Scem { 0x0c518086, "BWD IOAT Ch1" }, 216287117Scem { 0x0c528086, "BWD IOAT Ch2" }, 217287117Scem { 0x0c538086, "BWD IOAT Ch3" }, 218287117Scem 219287117Scem { 0x6f508086, "BDXDE IOAT Ch0" }, 220287117Scem { 0x6f518086, "BDXDE IOAT Ch1" }, 221287117Scem { 0x6f528086, "BDXDE IOAT Ch2" }, 222287117Scem { 0x6f538086, "BDXDE IOAT Ch3" }, 223287117Scem 224292032Scem { 0x6f208086, "BDX IOAT Ch0" }, 225292032Scem { 0x6f218086, "BDX IOAT Ch1" }, 226292032Scem { 0x6f228086, "BDX IOAT Ch2" }, 227292032Scem { 0x6f238086, "BDX IOAT Ch3" }, 228292032Scem { 0x6f248086, "BDX IOAT Ch4" }, 229292032Scem { 0x6f258086, "BDX IOAT Ch5" }, 230292032Scem { 0x6f268086, "BDX IOAT Ch6" }, 231292032Scem { 0x6f278086, "BDX IOAT Ch7" }, 232292032Scem { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 233292032Scem { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 234330450Seadler { 0x20218086, "SKX IOAT" }, 235330450Seadler { 0x00000000, NULL } 236287117Scem}; 237287117Scem 238287117Scem/* 239287117Scem * OS <-> Driver linkage functions 240287117Scem */ 241287117Scemstatic int 242287117Scemioat_probe(device_t device) 243287117Scem{ 244287117Scem struct _pcsid *ep; 245287117Scem u_int32_t type; 246287117Scem 247287117Scem type = pci_get_devid(device); 248287117Scem for (ep = pci_ids; ep->type; ep++) { 249287117Scem if (ep->type == type) { 250287117Scem device_set_desc(device, ep->desc); 251287117Scem return (0); 252287117Scem } 253287117Scem } 254287117Scem return (ENXIO); 255287117Scem} 256287117Scem 257287117Scemstatic int 258287117Scemioat_attach(device_t device) 259287117Scem{ 260287117Scem struct ioat_softc *ioat; 261344650Smav int error, i; 262287117Scem 263287117Scem ioat = DEVICE2SOFTC(device); 264287117Scem ioat->device = device; 265287117Scem 266287117Scem error = ioat_map_pci_bar(ioat); 267287117Scem if (error != 0) 268287117Scem goto err; 269287117Scem 270287117Scem ioat->version = ioat_read_cbver(ioat); 271287117Scem if (ioat->version < IOAT_VER_3_0) { 272287117Scem error = ENODEV; 273287117Scem goto err; 274287117Scem } 275287117Scem 276287117Scem error = ioat3_attach(device); 277287117Scem if (error != 0) 278287117Scem goto err; 279287117Scem 280287117Scem error = pci_enable_busmaster(device); 281287117Scem if (error != 0) 282287117Scem goto err; 283287117Scem 284289907Scem error = ioat_setup_intr(ioat); 285289907Scem if (error != 0) 286289907Scem goto err; 287289907Scem 288289912Scem error = ioat_reset_hw(ioat); 289289760Scem if (error != 0) 290289907Scem goto err; 291289760Scem 292344650Smav ioat_process_events(ioat, FALSE); 293289760Scem ioat_setup_sysctl(device); 294289760Scem 295344650Smav mtx_lock(&ioat_list_mtx); 296344650Smav for (i = 0; i < IOAT_MAX_CHANNELS; i++) { 297344650Smav if (ioat_channel[i] == NULL) 298344650Smav break; 299344650Smav } 300344650Smav if (i >= IOAT_MAX_CHANNELS) { 301344650Smav mtx_unlock(&ioat_list_mtx); 302344650Smav device_printf(device, "Too many I/OAT devices in system\n"); 303344650Smav error = ENXIO; 304344650Smav goto err; 305344650Smav } 306344650Smav ioat->chan_idx = i; 307344650Smav ioat_channel[i] = ioat; 308344650Smav if (i >= ioat_channel_index) 309344650Smav ioat_channel_index = i + 1; 310344650Smav mtx_unlock(&ioat_list_mtx); 311344650Smav 312289760Scem ioat_test_attach(); 313287117Scem 314287117Scemerr: 315287117Scem if (error != 0) 316287117Scem ioat_detach(device); 317287117Scem return (error); 318287117Scem} 319287117Scem 320287117Scemstatic int 321287117Scemioat_detach(device_t device) 322287117Scem{ 323287117Scem struct ioat_softc *ioat; 324287117Scem 325287117Scem ioat = DEVICE2SOFTC(device); 326289760Scem 327344650Smav mtx_lock(&ioat_list_mtx); 328344650Smav ioat_channel[ioat->chan_idx] = NULL; 329344650Smav while (ioat_channel_index > 0 && 330344650Smav ioat_channel[ioat_channel_index - 1] == NULL) 331344650Smav ioat_channel_index--; 332344650Smav mtx_unlock(&ioat_list_mtx); 333344650Smav 334289760Scem ioat_test_detach(); 335295605Scem taskqueue_drain(taskqueue_thread, &ioat->reset_task); 336289904Scem 337344650Smav mtx_lock(&ioat->submit_lock); 338290131Scem ioat->quiescing = TRUE; 339297746Scem ioat->destroying = TRUE; 340297746Scem wakeup(&ioat->quiescing); 341302353Scem wakeup(&ioat->resetting); 342297746Scem 343290131Scem ioat_drain_locked(ioat); 344344650Smav mtx_unlock(&ioat->submit_lock); 345344650Smav mtx_lock(&ioat->cleanup_lock); 346344650Smav while (ioat_get_active(ioat) > 0) 347344650Smav msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 348344650Smav mtx_unlock(&ioat->cleanup_lock); 349290131Scem 350289904Scem ioat_teardown_intr(ioat); 351302352Scem callout_drain(&ioat->poll_timer); 352287117Scem 353287117Scem pci_disable_busmaster(device); 354287117Scem 355287117Scem if (ioat->pci_resource != NULL) 356287117Scem bus_release_resource(device, SYS_RES_MEMORY, 357287117Scem ioat->pci_resource_id, ioat->pci_resource); 358287117Scem 359289982Scem if (ioat->ring != NULL) 360289982Scem ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 361287117Scem 362287117Scem if (ioat->comp_update != NULL) { 363287117Scem bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 364287117Scem bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 365287117Scem ioat->comp_update_map); 366287117Scem bus_dma_tag_destroy(ioat->comp_update_tag); 367287117Scem } 368287117Scem 369344401Smav if (ioat->hw_desc_ring != NULL) { 370344401Smav bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map); 371344401Smav bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring, 372344401Smav ioat->hw_desc_map); 373344401Smav bus_dma_tag_destroy(ioat->hw_desc_tag); 374344401Smav } 375287117Scem 376287403Scem return (0); 377287403Scem} 378287403Scem 379287403Scemstatic int 380287403Scemioat_teardown_intr(struct ioat_softc *ioat) 381287403Scem{ 382287403Scem 383287117Scem if (ioat->tag != NULL) 384287403Scem bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 385287117Scem 386287117Scem if (ioat->res != NULL) 387287403Scem bus_release_resource(ioat->device, SYS_RES_IRQ, 388287117Scem rman_get_rid(ioat->res), ioat->res); 389287117Scem 390287403Scem pci_release_msi(ioat->device); 391287117Scem return (0); 392287117Scem} 393287117Scem 394287117Scemstatic int 395289912Scemioat_start_channel(struct ioat_softc *ioat) 396287117Scem{ 397302354Scem struct ioat_dma_hw_descriptor *hw_desc; 398302354Scem struct ioat_descriptor *desc; 399302354Scem struct bus_dmadesc *dmadesc; 400287117Scem uint64_t status; 401287117Scem uint32_t chanerr; 402287117Scem int i; 403287117Scem 404287117Scem ioat_acquire(&ioat->dmaengine); 405302354Scem 406302354Scem /* Submit 'NULL' operation manually to avoid quiescing flag */ 407302354Scem desc = ioat_get_ring_entry(ioat, ioat->head); 408344401Smav hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma; 409302354Scem dmadesc = &desc->bus_dmadesc; 410302354Scem 411302354Scem dmadesc->callback_fn = NULL; 412302354Scem dmadesc->callback_arg = NULL; 413302354Scem 414302354Scem hw_desc->u.control_raw = 0; 415302354Scem hw_desc->u.control_generic.op = IOAT_OP_COPY; 416302354Scem hw_desc->u.control_generic.completion_update = 1; 417302354Scem hw_desc->size = 8; 418302354Scem hw_desc->src_addr = 0; 419302354Scem hw_desc->dest_addr = 0; 420302354Scem hw_desc->u.control.null = 1; 421302354Scem 422302354Scem ioat_submit_single(ioat); 423287117Scem ioat_release(&ioat->dmaengine); 424287117Scem 425287117Scem for (i = 0; i < 100; i++) { 426287117Scem DELAY(1); 427287117Scem status = ioat_get_chansts(ioat); 428287117Scem if (is_ioat_idle(status)) 429287117Scem return (0); 430287117Scem } 431287117Scem 432287117Scem chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 433287117Scem ioat_log_message(0, "could not start channel: " 434289983Scem "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 435289983Scem IOAT_CHANERR_STR); 436287117Scem return (ENXIO); 437287117Scem} 438287117Scem 439287117Scem/* 440287117Scem * Initialize Hardware 441287117Scem */ 442287117Scemstatic int 443287117Scemioat3_attach(device_t device) 444287117Scem{ 445287117Scem struct ioat_softc *ioat; 446344401Smav struct ioat_descriptor *ring; 447287117Scem struct ioat_dma_hw_descriptor *dma_hw_desc; 448344401Smav void *hw_desc; 449344401Smav size_t ringsz; 450287117Scem int i, num_descriptors; 451287117Scem int error; 452287117Scem uint8_t xfercap; 453287117Scem 454287117Scem error = 0; 455287117Scem ioat = DEVICE2SOFTC(device); 456290087Scem ioat->capabilities = ioat_read_dmacapability(ioat); 457287117Scem 458301300Scem ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 459290087Scem IOAT_DMACAP_STR); 460290087Scem 461287117Scem xfercap = ioat_read_xfercap(ioat); 462287117Scem ioat->max_xfer_size = 1 << xfercap; 463287117Scem 464292228Scem ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 465292228Scem IOAT_INTRDELAY_SUPPORTED) != 0; 466292228Scem if (ioat->intrdelay_supported) 467292228Scem ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 468292228Scem 469287117Scem /* TODO: need to check DCA here if we ever do XOR/PQ */ 470287117Scem 471287117Scem mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 472290229Scem mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 473302352Scem callout_init(&ioat->poll_timer, 1); 474295605Scem TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 475287117Scem 476290229Scem /* Establish lock order for Witness */ 477344650Smav mtx_lock(&ioat->cleanup_lock); 478290229Scem mtx_lock(&ioat->submit_lock); 479344650Smav mtx_unlock(&ioat->submit_lock); 480290229Scem mtx_unlock(&ioat->cleanup_lock); 481290229Scem 482344401Smav ioat->is_submitter_processing = FALSE; 483287117Scem 484287117Scem bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 485287117Scem BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 486287117Scem sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 487287117Scem &ioat->comp_update_tag); 488287117Scem 489287117Scem error = bus_dmamem_alloc(ioat->comp_update_tag, 490355154Smav (void **)&ioat->comp_update, BUS_DMA_ZERO | BUS_DMA_WAITOK, 491355154Smav &ioat->comp_update_map); 492287117Scem if (ioat->comp_update == NULL) 493287117Scem return (ENOMEM); 494287117Scem 495287117Scem error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 496287117Scem ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 497355154Smav BUS_DMA_NOWAIT); 498287117Scem if (error != 0) 499287117Scem return (error); 500287117Scem 501344401Smav ioat->ring_size_order = g_ioat_ring_order; 502287117Scem num_descriptors = 1 << ioat->ring_size_order; 503344401Smav ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors; 504287117Scem 505344401Smav error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 506344401Smav 2 * 1024 * 1024, 0x0, (bus_addr_t)BUS_SPACE_MAXADDR_40BIT, 507344401Smav BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL, 508287117Scem &ioat->hw_desc_tag); 509344401Smav if (error != 0) 510344401Smav return (error); 511287117Scem 512344401Smav error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc, 513344401Smav BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map); 514344401Smav if (error != 0) 515344401Smav return (error); 516344401Smav 517344401Smav error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 518355154Smav ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_NOWAIT); 519344401Smav if (error) 520344401Smav return (error); 521344401Smav 522344401Smav ioat->hw_desc_ring = hw_desc; 523344401Smav 524287117Scem ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 525289982Scem M_ZERO | M_WAITOK); 526287117Scem 527287117Scem ring = ioat->ring; 528287117Scem for (i = 0; i < num_descriptors; i++) { 529344401Smav memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc)); 530344401Smav ring[i].id = i; 531287117Scem } 532287117Scem 533344401Smav for (i = 0; i < num_descriptors; i++) { 534344401Smav dma_hw_desc = &ioat->hw_desc_ring[i].dma; 535344401Smav dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1); 536287117Scem } 537287117Scem 538355196Smav ioat->tail = ioat->head = 0; 539355196Smav *ioat->comp_update = ioat->last_seen = 540355196Smav RING_PHYS_ADDR(ioat, ioat->tail - 1); 541287117Scem return (0); 542287117Scem} 543287117Scem 544287117Scemstatic int 545287117Scemioat_map_pci_bar(struct ioat_softc *ioat) 546287117Scem{ 547287117Scem 548287117Scem ioat->pci_resource_id = PCIR_BAR(0); 549289911Scem ioat->pci_resource = bus_alloc_resource_any(ioat->device, 550289911Scem SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 551287117Scem 552287117Scem if (ioat->pci_resource == NULL) { 553287117Scem ioat_log_message(0, "unable to allocate pci resource\n"); 554287117Scem return (ENODEV); 555287117Scem } 556287117Scem 557287117Scem ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 558287117Scem ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 559287117Scem return (0); 560287117Scem} 561287117Scem 562287117Scemstatic void 563287117Scemioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 564287117Scem{ 565287117Scem struct ioat_softc *ioat = arg; 566287117Scem 567289912Scem KASSERT(error == 0, ("%s: error:%d", __func__, error)); 568287117Scem ioat->comp_update_bus_addr = seg[0].ds_addr; 569287117Scem} 570287117Scem 571287117Scemstatic void 572287117Scemioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 573287117Scem{ 574287117Scem bus_addr_t *baddr; 575287117Scem 576289912Scem KASSERT(error == 0, ("%s: error:%d", __func__, error)); 577287117Scem baddr = arg; 578287117Scem *baddr = segs->ds_addr; 579287117Scem} 580287117Scem 581287117Scem/* 582287117Scem * Interrupt setup and handlers 583287117Scem */ 584287117Scemstatic int 585287403Scemioat_setup_intr(struct ioat_softc *ioat) 586287117Scem{ 587287117Scem uint32_t num_vectors; 588287117Scem int error; 589287117Scem boolean_t use_msix; 590287117Scem boolean_t force_legacy_interrupts; 591287117Scem 592287117Scem use_msix = FALSE; 593287117Scem force_legacy_interrupts = FALSE; 594287117Scem 595287117Scem if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 596287117Scem num_vectors = 1; 597287117Scem pci_alloc_msix(ioat->device, &num_vectors); 598287117Scem if (num_vectors == 1) 599287117Scem use_msix = TRUE; 600287117Scem } 601287117Scem 602287117Scem if (use_msix) { 603287117Scem ioat->rid = 1; 604287117Scem ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 605287117Scem &ioat->rid, RF_ACTIVE); 606287117Scem } else { 607287117Scem ioat->rid = 0; 608287117Scem ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 609287117Scem &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 610287117Scem } 611287117Scem if (ioat->res == NULL) { 612287117Scem ioat_log_message(0, "bus_alloc_resource failed\n"); 613287117Scem return (ENOMEM); 614287117Scem } 615287117Scem 616287117Scem ioat->tag = NULL; 617287117Scem error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 618287117Scem INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 619287117Scem if (error != 0) { 620287117Scem ioat_log_message(0, "bus_setup_intr failed\n"); 621287117Scem return (error); 622287117Scem } 623287117Scem 624287117Scem ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 625287117Scem return (0); 626287117Scem} 627287117Scem 628287403Scemstatic boolean_t 629287414Scemioat_model_resets_msix(struct ioat_softc *ioat) 630287403Scem{ 631287403Scem u_int32_t pciid; 632287403Scem 633287403Scem pciid = pci_get_devid(ioat->device); 634287403Scem switch (pciid) { 635287414Scem /* BWD: */ 636287414Scem case 0x0c508086: 637287414Scem case 0x0c518086: 638287414Scem case 0x0c528086: 639287414Scem case 0x0c538086: 640287414Scem /* BDXDE: */ 641287403Scem case 0x6f508086: 642287403Scem case 0x6f518086: 643287403Scem case 0x6f528086: 644287403Scem case 0x6f538086: 645287403Scem return (TRUE); 646287403Scem } 647287403Scem 648287403Scem return (FALSE); 649287403Scem} 650287403Scem 651287117Scemstatic void 652287117Scemioat_interrupt_handler(void *arg) 653287117Scem{ 654287117Scem struct ioat_softc *ioat = arg; 655287117Scem 656292226Scem ioat->stats.interrupts++; 657344650Smav ioat_process_events(ioat, TRUE); 658287117Scem} 659287117Scem 660290229Scemstatic int 661290229Scemchanerr_to_errno(uint32_t chanerr) 662290229Scem{ 663290229Scem 664290229Scem if (chanerr == 0) 665290229Scem return (0); 666290229Scem if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 667290229Scem return (EFAULT); 668290229Scem if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 669290229Scem return (EIO); 670290229Scem /* This one is probably our fault: */ 671290229Scem if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 672290229Scem return (EIO); 673290229Scem return (EIO); 674290229Scem} 675290229Scem 676287117Scemstatic void 677344650Smavioat_process_events(struct ioat_softc *ioat, boolean_t intr) 678287117Scem{ 679287117Scem struct ioat_descriptor *desc; 680287117Scem struct bus_dmadesc *dmadesc; 681287117Scem uint64_t comp_update, status; 682290229Scem uint32_t completed, chanerr; 683290229Scem int error; 684287117Scem 685356430Smav if (intr) { 686356430Smav mtx_lock(&ioat->cleanup_lock); 687356430Smav } else { 688356430Smav if (!mtx_trylock(&ioat->cleanup_lock)) 689356430Smav return; 690356430Smav } 691287117Scem 692302354Scem /* 693302354Scem * Don't run while the hardware is being reset. Reset is responsible 694302354Scem * for blocking new work and draining & completing existing work, so 695302354Scem * there is nothing to do until new work is queued after reset anyway. 696302354Scem */ 697302354Scem if (ioat->resetting_cleanup) { 698302354Scem mtx_unlock(&ioat->cleanup_lock); 699302354Scem return; 700302354Scem } 701302354Scem 702287117Scem completed = 0; 703287117Scem comp_update = *ioat->comp_update; 704287117Scem status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 705287117Scem 706344401Smav if (status < ioat->hw_desc_bus_addr || 707344401Smav status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) * 708344401Smav sizeof(struct ioat_generic_hw_descriptor)) 709344401Smav panic("Bogus completion address %jx (channel %u)", 710344401Smav (uintmax_t)status, ioat->chan_idx); 711344401Smav 712295588Scem if (status == ioat->last_seen) { 713295588Scem /* 714295588Scem * If we landed in process_events and nothing has been 715295588Scem * completed, check for a timeout due to channel halt. 716295588Scem */ 717289909Scem goto out; 718295588Scem } 719344401Smav CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx", 720344401Smav __func__, ioat->chan_idx, comp_update, ioat->last_seen); 721287117Scem 722344401Smav while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) { 723287117Scem desc = ioat_get_ring_entry(ioat, ioat->tail); 724287117Scem dmadesc = &desc->bus_dmadesc; 725344401Smav CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)", 726344401Smav ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 727344401Smav dmadesc->callback_arg); 728287117Scem 729290229Scem if (dmadesc->callback_fn != NULL) 730290229Scem dmadesc->callback_fn(dmadesc->callback_arg, 0); 731287117Scem 732289907Scem completed++; 733287117Scem ioat->tail++; 734287117Scem } 735344401Smav CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 736344401Smav ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 737287117Scem 738344401Smav if (completed != 0) { 739344401Smav ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1); 740344401Smav ioat->stats.descriptors_processed += completed; 741344650Smav wakeup(&ioat->tail); 742344401Smav } 743292226Scem 744289909Scemout: 745287117Scem ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 746287117Scem mtx_unlock(&ioat->cleanup_lock); 747289907Scem 748344401Smav /* 749344401Smav * The device doesn't seem to reliably push suspend/halt statuses to 750344401Smav * the channel completion memory address, so poll the device register 751344650Smav * here. For performance reasons skip it on interrupts, do it only 752344650Smav * on much more rare polling events. 753344401Smav */ 754344650Smav if (!intr) 755344650Smav comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 756295604Scem if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 757290229Scem return; 758290229Scem 759292226Scem ioat->stats.channel_halts++; 760292226Scem 761290229Scem /* 762290229Scem * Fatal programming error on this DMA channel. Flush any outstanding 763290229Scem * work with error status and restart the engine. 764290229Scem */ 765290229Scem mtx_lock(&ioat->submit_lock); 766290229Scem ioat->quiescing = TRUE; 767344650Smav mtx_unlock(&ioat->submit_lock); 768344650Smav 769344401Smav /* 770344650Smav * This is safe to do here because the submit queue is quiesced. We 771344650Smav * know that we will drain all outstanding events, so ioat_reset_hw 772344650Smav * can't deadlock. It is necessary to protect other ioat_process_event 773344650Smav * threads from racing ioat_reset_hw, reading an indeterminate hw 774344650Smav * state, and attempting to continue issuing completions. 775344401Smav */ 776344650Smav mtx_lock(&ioat->cleanup_lock); 777344401Smav ioat->resetting_cleanup = TRUE; 778290229Scem 779290229Scem chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 780344401Smav if (1 <= g_ioat_debug_level) 781344401Smav ioat_halted_debug(ioat, chanerr); 782292226Scem ioat->stats.last_halt_chanerr = chanerr; 783290229Scem 784290229Scem while (ioat_get_active(ioat) > 0) { 785290229Scem desc = ioat_get_ring_entry(ioat, ioat->tail); 786290229Scem dmadesc = &desc->bus_dmadesc; 787344401Smav CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)", 788344401Smav ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 789344401Smav dmadesc->callback_arg); 790290229Scem 791290229Scem if (dmadesc->callback_fn != NULL) 792290229Scem dmadesc->callback_fn(dmadesc->callback_arg, 793290229Scem chanerr_to_errno(chanerr)); 794290229Scem 795290229Scem ioat->tail++; 796292226Scem ioat->stats.descriptors_processed++; 797292226Scem ioat->stats.descriptors_error++; 798290229Scem } 799344401Smav CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 800344401Smav ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 801290229Scem 802290229Scem /* Clear error status */ 803290229Scem ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 804290229Scem 805290229Scem mtx_unlock(&ioat->cleanup_lock); 806290229Scem 807290229Scem ioat_log_message(0, "Resetting channel to recover from error\n"); 808295605Scem error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 809295605Scem KASSERT(error == 0, 810295605Scem ("%s: taskqueue_enqueue failed: %d", __func__, error)); 811295605Scem} 812295605Scem 813295605Scemstatic void 814295605Scemioat_reset_hw_task(void *ctx, int pending __unused) 815295605Scem{ 816295605Scem struct ioat_softc *ioat; 817295605Scem int error; 818295605Scem 819295605Scem ioat = ctx; 820295605Scem ioat_log_message(1, "%s: Resetting channel\n", __func__); 821295605Scem 822290229Scem error = ioat_reset_hw(ioat); 823290229Scem KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 824295605Scem (void)error; 825287117Scem} 826287117Scem 827287117Scem/* 828287117Scem * User API functions 829287117Scem */ 830301297Scemunsigned 831301297Scemioat_get_nchannels(void) 832301297Scem{ 833301297Scem 834301297Scem return (ioat_channel_index); 835301297Scem} 836301297Scem 837287117Scembus_dmaengine_t 838297746Scemioat_get_dmaengine(uint32_t index, int flags) 839287117Scem{ 840297746Scem struct ioat_softc *ioat; 841287117Scem 842297746Scem KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 843297746Scem ("invalid flags: 0x%08x", flags)); 844297746Scem KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 845297746Scem ("invalid wait | nowait")); 846297746Scem 847344650Smav mtx_lock(&ioat_list_mtx); 848344650Smav if (index >= ioat_channel_index || 849344650Smav (ioat = ioat_channel[index]) == NULL) { 850344650Smav mtx_unlock(&ioat_list_mtx); 851289907Scem return (NULL); 852344650Smav } 853344650Smav mtx_lock(&ioat->submit_lock); 854344650Smav mtx_unlock(&ioat_list_mtx); 855290131Scem 856344650Smav if (ioat->destroying) { 857344650Smav mtx_unlock(&ioat->submit_lock); 858290131Scem return (NULL); 859344650Smav } 860290131Scem 861344650Smav ioat_get(ioat); 862297746Scem if (ioat->quiescing) { 863344650Smav if ((flags & M_NOWAIT) != 0) { 864344650Smav ioat_put(ioat); 865344650Smav mtx_unlock(&ioat->submit_lock); 866297746Scem return (NULL); 867344650Smav } 868297746Scem 869297746Scem while (ioat->quiescing && !ioat->destroying) 870344650Smav msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0); 871297746Scem 872344650Smav if (ioat->destroying) { 873344650Smav ioat_put(ioat); 874344650Smav mtx_unlock(&ioat->submit_lock); 875297746Scem return (NULL); 876344650Smav } 877297746Scem } 878344650Smav mtx_unlock(&ioat->submit_lock); 879344650Smav return (&ioat->dmaengine); 880287117Scem} 881287117Scem 882287117Scemvoid 883289907Scemioat_put_dmaengine(bus_dmaengine_t dmaengine) 884289907Scem{ 885289907Scem struct ioat_softc *ioat; 886289907Scem 887289907Scem ioat = to_ioat_softc(dmaengine); 888344650Smav mtx_lock(&ioat->submit_lock); 889344650Smav ioat_put(ioat); 890344650Smav mtx_unlock(&ioat->submit_lock); 891289907Scem} 892289907Scem 893292228Scemint 894292413Scemioat_get_hwversion(bus_dmaengine_t dmaengine) 895292413Scem{ 896292413Scem struct ioat_softc *ioat; 897292413Scem 898292413Scem ioat = to_ioat_softc(dmaengine); 899292413Scem return (ioat->version); 900292413Scem} 901292413Scem 902293221Scemsize_t 903293221Scemioat_get_max_io_size(bus_dmaengine_t dmaengine) 904293221Scem{ 905293221Scem struct ioat_softc *ioat; 906293221Scem 907293221Scem ioat = to_ioat_softc(dmaengine); 908293221Scem return (ioat->max_xfer_size); 909293221Scem} 910293221Scem 911344401Smavuint32_t 912344401Smavioat_get_capabilities(bus_dmaengine_t dmaengine) 913344401Smav{ 914344401Smav struct ioat_softc *ioat; 915344401Smav 916344401Smav ioat = to_ioat_softc(dmaengine); 917344401Smav return (ioat->capabilities); 918344401Smav} 919344401Smav 920292413Scemint 921355112Smavioat_get_domain(bus_dmaengine_t dmaengine, int *domain) 922355112Smav{ 923355112Smav struct ioat_softc *ioat; 924355112Smav 925355112Smav ioat = to_ioat_softc(dmaengine); 926355112Smav return (bus_get_domain(ioat->device, domain)); 927355112Smav} 928355112Smav 929355112Smavint 930292228Scemioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 931292228Scem{ 932292228Scem struct ioat_softc *ioat; 933292228Scem 934292228Scem ioat = to_ioat_softc(dmaengine); 935292228Scem if (!ioat->intrdelay_supported) 936292228Scem return (ENODEV); 937292228Scem if (delay > ioat->intrdelay_max) 938292228Scem return (ERANGE); 939292228Scem 940292228Scem ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 941292228Scem ioat->cached_intrdelay = 942292228Scem ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 943292228Scem return (0); 944292228Scem} 945292228Scem 946292228Scemuint16_t 947292228Scemioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 948292228Scem{ 949292228Scem struct ioat_softc *ioat; 950292228Scem 951292228Scem ioat = to_ioat_softc(dmaengine); 952292228Scem return (ioat->intrdelay_max); 953292228Scem} 954292228Scem 955289907Scemvoid 956287117Scemioat_acquire(bus_dmaengine_t dmaengine) 957287117Scem{ 958287117Scem struct ioat_softc *ioat; 959287117Scem 960287117Scem ioat = to_ioat_softc(dmaengine); 961287117Scem mtx_lock(&ioat->submit_lock); 962344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 963344401Smav ioat->acq_head = ioat->head; 964287117Scem} 965287117Scem 966293390Scemint 967293390Scemioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 968293390Scem{ 969293390Scem struct ioat_softc *ioat; 970293390Scem int error; 971293390Scem 972293390Scem ioat = to_ioat_softc(dmaengine); 973293390Scem ioat_acquire(dmaengine); 974293390Scem 975293390Scem error = ioat_reserve_space(ioat, n, mflags); 976293390Scem if (error != 0) 977293390Scem ioat_release(dmaengine); 978293390Scem return (error); 979293390Scem} 980293390Scem 981287117Scemvoid 982287117Scemioat_release(bus_dmaengine_t dmaengine) 983287117Scem{ 984287117Scem struct ioat_softc *ioat; 985287117Scem 986289776Scem ioat = to_ioat_softc(dmaengine); 987344650Smav CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__, 988344650Smav ioat->chan_idx, ioat->head); 989344401Smav KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */); 990344650Smav CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__, 991344650Smav ioat->chan_idx, ioat->head); 992344401Smav 993344401Smav if (ioat->acq_head != ioat->head) { 994344401Smav ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, 995344650Smav (uint16_t)ioat->head); 996344401Smav 997344650Smav if (!callout_pending(&ioat->poll_timer)) { 998344401Smav callout_reset(&ioat->poll_timer, 1, 999344401Smav ioat_poll_timer_callback, ioat); 1000344401Smav } 1001344401Smav } 1002287117Scem mtx_unlock(&ioat->submit_lock); 1003287117Scem} 1004287117Scem 1005290020Scemstatic struct ioat_descriptor * 1006290020Scemioat_op_generic(struct ioat_softc *ioat, uint8_t op, 1007290020Scem uint32_t size, uint64_t src, uint64_t dst, 1008290020Scem bus_dmaengine_callback_t callback_fn, void *callback_arg, 1009290020Scem uint32_t flags) 1010287117Scem{ 1011290020Scem struct ioat_generic_hw_descriptor *hw_desc; 1012287117Scem struct ioat_descriptor *desc; 1013289982Scem int mflags; 1014287117Scem 1015290020Scem mtx_assert(&ioat->submit_lock, MA_OWNED); 1016290020Scem 1017298989Scem KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 1018298989Scem ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 1019353581Smav KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)", 1020353581Smav __func__, (unsigned)size, ioat->max_xfer_size)); 1021353581Smav 1022289982Scem if ((flags & DMA_NO_WAIT) != 0) 1023289982Scem mflags = M_NOWAIT; 1024289982Scem else 1025289982Scem mflags = M_WAITOK; 1026287117Scem 1027289982Scem if (ioat_reserve_space(ioat, 1, mflags) != 0) 1028287117Scem return (NULL); 1029287117Scem 1030287117Scem desc = ioat_get_ring_entry(ioat, ioat->head); 1031344401Smav hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic; 1032287117Scem 1033287117Scem hw_desc->u.control_raw = 0; 1034290020Scem hw_desc->u.control_generic.op = op; 1035290020Scem hw_desc->u.control_generic.completion_update = 1; 1036287117Scem 1037287117Scem if ((flags & DMA_INT_EN) != 0) 1038290020Scem hw_desc->u.control_generic.int_enable = 1; 1039294062Scem if ((flags & DMA_FENCE) != 0) 1040294062Scem hw_desc->u.control_generic.fence = 1; 1041287117Scem 1042290020Scem hw_desc->size = size; 1043290020Scem hw_desc->src_addr = src; 1044290020Scem hw_desc->dest_addr = dst; 1045287117Scem 1046287117Scem desc->bus_dmadesc.callback_fn = callback_fn; 1047287117Scem desc->bus_dmadesc.callback_arg = callback_arg; 1048290020Scem return (desc); 1049290020Scem} 1050287117Scem 1051290020Scemstruct bus_dmadesc * 1052290020Scemioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 1053290020Scem void *callback_arg, uint32_t flags) 1054290020Scem{ 1055290020Scem struct ioat_dma_hw_descriptor *hw_desc; 1056290020Scem struct ioat_descriptor *desc; 1057290020Scem struct ioat_softc *ioat; 1058290020Scem 1059290020Scem ioat = to_ioat_softc(dmaengine); 1060344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1061290020Scem 1062290020Scem desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 1063290020Scem callback_arg, flags); 1064290020Scem if (desc == NULL) 1065290020Scem return (NULL); 1066290020Scem 1067344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1068290020Scem hw_desc->u.control.null = 1; 1069287117Scem ioat_submit_single(ioat); 1070287117Scem return (&desc->bus_dmadesc); 1071287117Scem} 1072287117Scem 1073287117Scemstruct bus_dmadesc * 1074287117Scemioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 1075287117Scem bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 1076287117Scem void *callback_arg, uint32_t flags) 1077287117Scem{ 1078290020Scem struct ioat_dma_hw_descriptor *hw_desc; 1079287117Scem struct ioat_descriptor *desc; 1080287117Scem struct ioat_softc *ioat; 1081287117Scem 1082287117Scem ioat = to_ioat_softc(dmaengine); 1083287117Scem 1084353581Smav KASSERT(((src | dst) & (0xffffull << 48)) == 0, 1085353581Smav ("%s: high 16 bits of src/dst are not zero", __func__)); 1086287117Scem 1087290020Scem desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 1088290020Scem callback_arg, flags); 1089290020Scem if (desc == NULL) 1090287117Scem return (NULL); 1091287117Scem 1092344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1093287117Scem if (g_ioat_debug_level >= 3) 1094287117Scem dump_descriptor(hw_desc); 1095287117Scem 1096287117Scem ioat_submit_single(ioat); 1097344401Smav CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx", 1098344401Smav __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len); 1099287117Scem return (&desc->bus_dmadesc); 1100287117Scem} 1101287117Scem 1102290021Scemstruct bus_dmadesc * 1103292031Scemioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1104292031Scem bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1105292031Scem bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1106292031Scem{ 1107292031Scem struct ioat_dma_hw_descriptor *hw_desc; 1108292031Scem struct ioat_descriptor *desc; 1109292031Scem struct ioat_softc *ioat; 1110292031Scem 1111292031Scem ioat = to_ioat_softc(dmaengine); 1112344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1113292031Scem 1114353581Smav KASSERT(((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) == 0, 1115353581Smav ("%s: high 16 bits of src/dst are not zero", __func__)); 1116353581Smav KASSERT(((src1 | src2 | dst1 | dst2) & PAGE_MASK) == 0, 1117353581Smav ("%s: addresses are not page-aligned", __func__)); 1118292031Scem 1119292031Scem desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, 1120292031Scem callback_fn, callback_arg, flags); 1121292031Scem if (desc == NULL) 1122292031Scem return (NULL); 1123292031Scem 1124344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1125292031Scem if (src2 != src1 + PAGE_SIZE) { 1126292031Scem hw_desc->u.control.src_page_break = 1; 1127292031Scem hw_desc->next_src_addr = src2; 1128292031Scem } 1129292031Scem if (dst2 != dst1 + PAGE_SIZE) { 1130292031Scem hw_desc->u.control.dest_page_break = 1; 1131292031Scem hw_desc->next_dest_addr = dst2; 1132292031Scem } 1133292031Scem 1134292031Scem if (g_ioat_debug_level >= 3) 1135292031Scem dump_descriptor(hw_desc); 1136292031Scem 1137292031Scem ioat_submit_single(ioat); 1138292031Scem return (&desc->bus_dmadesc); 1139292031Scem} 1140292031Scem 1141292031Scemstruct bus_dmadesc * 1142298989Scemioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1143298989Scem bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1144298989Scem bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1145298989Scem{ 1146298989Scem struct ioat_crc32_hw_descriptor *hw_desc; 1147298989Scem struct ioat_descriptor *desc; 1148298989Scem struct ioat_softc *ioat; 1149298989Scem uint32_t teststore; 1150298989Scem uint8_t op; 1151298989Scem 1152298989Scem ioat = to_ioat_softc(dmaengine); 1153344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1154298989Scem 1155353581Smav KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0, 1156353581Smav ("%s: device lacks MOVECRC capability", __func__)); 1157353581Smav KASSERT(((src | dst) & (0xffffffull << 40)) == 0, 1158353581Smav ("%s: high 24 bits of src/dst are not zero", __func__)); 1159298989Scem teststore = (flags & _DMA_CRC_TESTSTORE); 1160353581Smav KASSERT(teststore != _DMA_CRC_TESTSTORE, 1161353581Smav ("%s: TEST and STORE invalid", __func__)); 1162353581Smav KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1163353581Smav ("%s: INLINE invalid without TEST or STORE", __func__)); 1164298989Scem 1165298989Scem switch (teststore) { 1166298989Scem case DMA_CRC_STORE: 1167298989Scem op = IOAT_OP_MOVECRC_STORE; 1168298989Scem break; 1169298989Scem case DMA_CRC_TEST: 1170298989Scem op = IOAT_OP_MOVECRC_TEST; 1171298989Scem break; 1172298989Scem default: 1173298989Scem KASSERT(teststore == 0, ("bogus")); 1174298989Scem op = IOAT_OP_MOVECRC; 1175298989Scem break; 1176298989Scem } 1177298989Scem 1178353581Smav KASSERT((flags & DMA_CRC_INLINE) != 0 || 1179353581Smav (crcptr & (0xffffffull << 40)) == 0, 1180353581Smav ("%s: high 24 bits of crcptr are not zero", __func__)); 1181298989Scem 1182298989Scem desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1183298989Scem callback_arg, flags & ~_DMA_CRC_FLAGS); 1184298989Scem if (desc == NULL) 1185298989Scem return (NULL); 1186298989Scem 1187344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1188298989Scem 1189298989Scem if ((flags & DMA_CRC_INLINE) == 0) 1190298989Scem hw_desc->crc_address = crcptr; 1191298989Scem else 1192298989Scem hw_desc->u.control.crc_location = 1; 1193298989Scem 1194298989Scem if (initialseed != NULL) { 1195298989Scem hw_desc->u.control.use_seed = 1; 1196298989Scem hw_desc->seed = *initialseed; 1197298989Scem } 1198298989Scem 1199298989Scem if (g_ioat_debug_level >= 3) 1200298989Scem dump_descriptor(hw_desc); 1201298989Scem 1202298989Scem ioat_submit_single(ioat); 1203298989Scem return (&desc->bus_dmadesc); 1204298989Scem} 1205298989Scem 1206298989Scemstruct bus_dmadesc * 1207298989Scemioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1208298989Scem uint32_t *initialseed, bus_addr_t crcptr, 1209298989Scem bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1210298989Scem{ 1211298989Scem struct ioat_crc32_hw_descriptor *hw_desc; 1212298989Scem struct ioat_descriptor *desc; 1213298989Scem struct ioat_softc *ioat; 1214298989Scem uint32_t teststore; 1215298989Scem uint8_t op; 1216298989Scem 1217298989Scem ioat = to_ioat_softc(dmaengine); 1218344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1219298989Scem 1220353581Smav KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0, 1221353581Smav ("%s: device lacks CRC capability", __func__)); 1222353581Smav KASSERT((src & (0xffffffull << 40)) == 0, 1223353581Smav ("%s: high 24 bits of src are not zero", __func__)); 1224298989Scem teststore = (flags & _DMA_CRC_TESTSTORE); 1225353581Smav KASSERT(teststore != _DMA_CRC_TESTSTORE, 1226353581Smav ("%s: TEST and STORE invalid", __func__)); 1227353581Smav KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0, 1228353581Smav ("%s: INLINE invalid without TEST or STORE", __func__)); 1229298989Scem 1230298989Scem switch (teststore) { 1231298989Scem case DMA_CRC_STORE: 1232298989Scem op = IOAT_OP_CRC_STORE; 1233298989Scem break; 1234298989Scem case DMA_CRC_TEST: 1235298989Scem op = IOAT_OP_CRC_TEST; 1236298989Scem break; 1237298989Scem default: 1238298989Scem KASSERT(teststore == 0, ("bogus")); 1239298989Scem op = IOAT_OP_CRC; 1240298989Scem break; 1241298989Scem } 1242298989Scem 1243353581Smav KASSERT((flags & DMA_CRC_INLINE) != 0 || 1244353581Smav (crcptr & (0xffffffull << 40)) == 0, 1245353581Smav ("%s: high 24 bits of crcptr are not zero", __func__)); 1246298989Scem 1247298989Scem desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1248298989Scem callback_arg, flags & ~_DMA_CRC_FLAGS); 1249298989Scem if (desc == NULL) 1250298989Scem return (NULL); 1251298989Scem 1252344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1253298989Scem 1254298989Scem if ((flags & DMA_CRC_INLINE) == 0) 1255298989Scem hw_desc->crc_address = crcptr; 1256298989Scem else 1257298989Scem hw_desc->u.control.crc_location = 1; 1258298989Scem 1259298989Scem if (initialseed != NULL) { 1260298989Scem hw_desc->u.control.use_seed = 1; 1261298989Scem hw_desc->seed = *initialseed; 1262298989Scem } 1263298989Scem 1264298989Scem if (g_ioat_debug_level >= 3) 1265298989Scem dump_descriptor(hw_desc); 1266298989Scem 1267298989Scem ioat_submit_single(ioat); 1268298989Scem return (&desc->bus_dmadesc); 1269298989Scem} 1270298989Scem 1271298989Scemstruct bus_dmadesc * 1272290021Scemioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1273290021Scem bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1274290021Scem uint32_t flags) 1275290021Scem{ 1276290021Scem struct ioat_fill_hw_descriptor *hw_desc; 1277290021Scem struct ioat_descriptor *desc; 1278290021Scem struct ioat_softc *ioat; 1279290021Scem 1280290021Scem ioat = to_ioat_softc(dmaengine); 1281344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1282290021Scem 1283353581Smav KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0, 1284353581Smav ("%s: device lacks BFILL capability", __func__)); 1285353581Smav KASSERT((dst & (0xffffull << 48)) == 0, 1286353581Smav ("%s: high 16 bits of crcptr are not zero", __func__)); 1287290087Scem 1288290021Scem desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 1289290021Scem callback_fn, callback_arg, flags); 1290290021Scem if (desc == NULL) 1291290021Scem return (NULL); 1292290021Scem 1293344401Smav hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill; 1294290021Scem if (g_ioat_debug_level >= 3) 1295290021Scem dump_descriptor(hw_desc); 1296290021Scem 1297290021Scem ioat_submit_single(ioat); 1298290021Scem return (&desc->bus_dmadesc); 1299290021Scem} 1300290021Scem 1301287117Scem/* 1302287117Scem * Ring Management 1303287117Scem */ 1304287117Scemstatic inline uint32_t 1305287117Scemioat_get_active(struct ioat_softc *ioat) 1306287117Scem{ 1307287117Scem 1308287117Scem return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1309287117Scem} 1310287117Scem 1311287117Scemstatic inline uint32_t 1312287117Scemioat_get_ring_space(struct ioat_softc *ioat) 1313287117Scem{ 1314287117Scem 1315287117Scem return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1316287117Scem} 1317287117Scem 1318289982Scem/* 1319289982Scem * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1320289982Scem * for 'num_descs'. 1321289982Scem * 1322289982Scem * If mflags contains M_WAITOK, blocks until enough space is available. 1323289982Scem * 1324289982Scem * Returns zero on success, or an errno on error. If num_descs is beyond the 1325289982Scem * maximum ring size, returns EINVAl; if allocation would block and mflags 1326289982Scem * contains M_NOWAIT, returns EAGAIN. 1327289982Scem * 1328289982Scem * Must be called with the submit_lock held; returns with the lock held. The 1329289982Scem * lock may be dropped to allocate the ring. 1330289982Scem * 1331289982Scem * (The submit_lock is needed to add any entries to the ring, so callers are 1332289982Scem * assured enough room is available.) 1333289982Scem */ 1334287117Scemstatic int 1335289982Scemioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1336287117Scem{ 1337344401Smav boolean_t dug; 1338289982Scem int error; 1339287117Scem 1340289982Scem mtx_assert(&ioat->submit_lock, MA_OWNED); 1341289982Scem error = 0; 1342344401Smav dug = FALSE; 1343289982Scem 1344344401Smav if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) { 1345289982Scem error = EINVAL; 1346289982Scem goto out; 1347289982Scem } 1348289982Scem 1349289982Scem for (;;) { 1350344401Smav if (ioat->quiescing) { 1351344401Smav error = ENXIO; 1352344401Smav goto out; 1353344401Smav } 1354344401Smav 1355287117Scem if (ioat_get_ring_space(ioat) >= num_descs) 1356289982Scem goto out; 1357287117Scem 1358344401Smav CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__, 1359344401Smav ioat->chan_idx, num_descs); 1360287117Scem 1361344401Smav if (!dug && !ioat->is_submitter_processing) { 1362344401Smav ioat->is_submitter_processing = TRUE; 1363289982Scem mtx_unlock(&ioat->submit_lock); 1364289982Scem 1365344401Smav CTR2(KTR_IOAT, "%s channel=%u attempting to process events", 1366344401Smav __func__, ioat->chan_idx); 1367344650Smav ioat_process_events(ioat, FALSE); 1368289982Scem 1369289982Scem mtx_lock(&ioat->submit_lock); 1370344401Smav dug = TRUE; 1371344401Smav KASSERT(ioat->is_submitter_processing == TRUE, 1372344401Smav ("is_submitter_processing")); 1373344401Smav ioat->is_submitter_processing = FALSE; 1374344401Smav wakeup(&ioat->tail); 1375344401Smav continue; 1376344401Smav } 1377289982Scem 1378344401Smav if ((mflags & M_WAITOK) == 0) { 1379344401Smav error = EAGAIN; 1380344401Smav break; 1381289982Scem } 1382344401Smav CTR2(KTR_IOAT, "%s channel=%u blocking on completions", 1383344401Smav __func__, ioat->chan_idx); 1384344401Smav msleep(&ioat->tail, &ioat->submit_lock, 0, 1385344401Smav "ioat_full", 0); 1386344401Smav continue; 1387287117Scem } 1388289982Scem 1389289982Scemout: 1390289982Scem mtx_assert(&ioat->submit_lock, MA_OWNED); 1391344401Smav KASSERT(!ioat->quiescing || error == ENXIO, 1392344401Smav ("reserved during quiesce")); 1393289982Scem return (error); 1394287117Scem} 1395287117Scem 1396289982Scemstatic void 1397289982Scemioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1398344401Smav struct ioat_descriptor *ring) 1399289982Scem{ 1400289982Scem 1401289982Scem free(ring, M_IOAT); 1402289982Scem} 1403289982Scem 1404287117Scemstatic struct ioat_descriptor * 1405287117Scemioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1406287117Scem{ 1407287117Scem 1408344401Smav return (&ioat->ring[index % (1 << ioat->ring_size_order)]); 1409287117Scem} 1410287117Scem 1411344401Smavstatic union ioat_hw_descriptor * 1412344401Smavioat_get_descriptor(struct ioat_softc *ioat, uint32_t index) 1413287117Scem{ 1414287117Scem 1415344401Smav return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]); 1416287117Scem} 1417287117Scem 1418287117Scemstatic void 1419289910Scemioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1420289910Scem{ 1421344401Smav union ioat_hw_descriptor *desc; 1422289910Scem 1423289983Scem ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1424289983Scem IOAT_CHANERR_STR); 1425289910Scem if (chanerr == 0) 1426289910Scem return; 1427289910Scem 1428290229Scem mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1429290229Scem 1430344401Smav desc = ioat_get_descriptor(ioat, ioat->tail + 0); 1431344401Smav dump_descriptor(desc); 1432289910Scem 1433344401Smav desc = ioat_get_descriptor(ioat, ioat->tail + 1); 1434344401Smav dump_descriptor(desc); 1435289910Scem} 1436289910Scem 1437289910Scemstatic void 1438302352Scemioat_poll_timer_callback(void *arg) 1439287117Scem{ 1440302352Scem struct ioat_softc *ioat; 1441302352Scem 1442302352Scem ioat = arg; 1443353581Smav CTR1(KTR_IOAT, "%s", __func__); 1444302352Scem 1445344650Smav ioat_process_events(ioat, FALSE); 1446344650Smav 1447344650Smav mtx_lock(&ioat->submit_lock); 1448344650Smav if (ioat_get_active(ioat) > 0) 1449344650Smav callout_schedule(&ioat->poll_timer, 1); 1450344650Smav mtx_unlock(&ioat->submit_lock); 1451302352Scem} 1452302352Scem 1453287117Scem/* 1454287117Scem * Support Functions 1455287117Scem */ 1456287117Scemstatic void 1457287117Scemioat_submit_single(struct ioat_softc *ioat) 1458287117Scem{ 1459287117Scem 1460344401Smav mtx_assert(&ioat->submit_lock, MA_OWNED); 1461344401Smav 1462344650Smav ioat->head++; 1463344650Smav CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__, 1464344650Smav ioat->chan_idx, ioat->head, ioat->tail); 1465287117Scem 1466292226Scem ioat->stats.descriptors_submitted++; 1467287117Scem} 1468287117Scem 1469287117Scemstatic int 1470287117Scemioat_reset_hw(struct ioat_softc *ioat) 1471287117Scem{ 1472287117Scem uint64_t status; 1473287117Scem uint32_t chanerr; 1474289912Scem unsigned timeout; 1475290131Scem int error; 1476287117Scem 1477344401Smav CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1478344401Smav 1479344650Smav mtx_lock(&ioat->submit_lock); 1480302353Scem while (ioat->resetting && !ioat->destroying) 1481344650Smav msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0); 1482302353Scem if (ioat->destroying) { 1483344650Smav mtx_unlock(&ioat->submit_lock); 1484302353Scem return (ENXIO); 1485302353Scem } 1486302353Scem ioat->resetting = TRUE; 1487290131Scem ioat->quiescing = TRUE; 1488344650Smav mtx_unlock(&ioat->submit_lock); 1489344650Smav mtx_lock(&ioat->cleanup_lock); 1490344650Smav while (ioat_get_active(ioat) > 0) 1491344650Smav msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1); 1492290131Scem 1493302354Scem /* 1494302354Scem * Suspend ioat_process_events while the hardware and softc are in an 1495302354Scem * indeterminate state. 1496302354Scem */ 1497302354Scem ioat->resetting_cleanup = TRUE; 1498302354Scem mtx_unlock(&ioat->cleanup_lock); 1499302354Scem 1500344401Smav CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__, 1501344401Smav ioat->chan_idx); 1502344401Smav 1503287117Scem status = ioat_get_chansts(ioat); 1504287117Scem if (is_ioat_active(status) || is_ioat_idle(status)) 1505287117Scem ioat_suspend(ioat); 1506287117Scem 1507287117Scem /* Wait at most 20 ms */ 1508287117Scem for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1509287117Scem timeout < 20; timeout++) { 1510287117Scem DELAY(1000); 1511287117Scem status = ioat_get_chansts(ioat); 1512287117Scem } 1513290131Scem if (timeout == 20) { 1514290131Scem error = ETIMEDOUT; 1515290131Scem goto out; 1516290131Scem } 1517287117Scem 1518289912Scem KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1519289912Scem 1520287117Scem chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1521287117Scem ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1522287117Scem 1523344401Smav CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__, 1524344401Smav ioat->chan_idx); 1525344401Smav 1526287117Scem /* 1527287117Scem * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1528287117Scem * that can cause stability issues for IOAT v3. 1529287117Scem */ 1530287117Scem pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1531287117Scem 4); 1532287117Scem chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1533287117Scem pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1534287117Scem 1535287414Scem /* 1536287414Scem * BDXDE and BWD models reset MSI-X registers on device reset. 1537287414Scem * Save/restore their contents manually. 1538287414Scem */ 1539289908Scem if (ioat_model_resets_msix(ioat)) { 1540289908Scem ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1541287414Scem pci_save_state(ioat->device); 1542289908Scem } 1543287414Scem 1544287117Scem ioat_reset(ioat); 1545344401Smav CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__, 1546344401Smav ioat->chan_idx); 1547287117Scem 1548287117Scem /* Wait at most 20 ms */ 1549287117Scem for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1550287117Scem DELAY(1000); 1551290131Scem if (timeout == 20) { 1552290131Scem error = ETIMEDOUT; 1553290131Scem goto out; 1554290131Scem } 1555287117Scem 1556289908Scem if (ioat_model_resets_msix(ioat)) { 1557289908Scem ioat_log_message(1, "device resets registers; restored\n"); 1558287414Scem pci_restore_state(ioat->device); 1559289908Scem } 1560287403Scem 1561289912Scem /* Reset attempts to return the hardware to "halted." */ 1562289912Scem status = ioat_get_chansts(ioat); 1563289912Scem if (is_ioat_active(status) || is_ioat_idle(status)) { 1564289912Scem /* So this really shouldn't happen... */ 1565289912Scem ioat_log_message(0, "Device is active after a reset?\n"); 1566289912Scem ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1567290131Scem error = 0; 1568290131Scem goto out; 1569289912Scem } 1570289912Scem 1571289912Scem chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1572290131Scem if (chanerr != 0) { 1573290229Scem mtx_lock(&ioat->cleanup_lock); 1574290229Scem ioat_halted_debug(ioat, chanerr); 1575290229Scem mtx_unlock(&ioat->cleanup_lock); 1576290131Scem error = EIO; 1577290131Scem goto out; 1578290131Scem } 1579289912Scem 1580289912Scem /* 1581289912Scem * Bring device back online after reset. Writing CHAINADDR brings the 1582289912Scem * device back to active. 1583289912Scem * 1584289912Scem * The internal ring counter resets to zero, so we have to start over 1585289912Scem * at zero as well. 1586289912Scem */ 1587344650Smav ioat->tail = ioat->head = 0; 1588355196Smav *ioat->comp_update = ioat->last_seen = 1589355196Smav RING_PHYS_ADDR(ioat, ioat->tail - 1); 1590289912Scem 1591289912Scem ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1592289912Scem ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1593344401Smav ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0)); 1594290131Scem error = 0; 1595344401Smav CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__, 1596344401Smav ioat->chan_idx); 1597290131Scem 1598290131Scemout: 1599344401Smav /* Enqueues a null operation and ensures it completes. */ 1600344401Smav if (error == 0) { 1601344401Smav error = ioat_start_channel(ioat); 1602344401Smav CTR2(KTR_IOAT, "%s channel=%u started channel", __func__, 1603344401Smav ioat->chan_idx); 1604344401Smav } 1605344401Smav 1606302354Scem /* 1607302354Scem * Resume completions now that ring state is consistent. 1608302354Scem */ 1609302354Scem mtx_lock(&ioat->cleanup_lock); 1610302354Scem ioat->resetting_cleanup = FALSE; 1611302354Scem mtx_unlock(&ioat->cleanup_lock); 1612302354Scem 1613302354Scem /* Unblock submission of new work */ 1614344650Smav mtx_lock(&ioat->submit_lock); 1615302354Scem ioat->quiescing = FALSE; 1616302354Scem wakeup(&ioat->quiescing); 1617302354Scem 1618302353Scem ioat->resetting = FALSE; 1619302353Scem wakeup(&ioat->resetting); 1620344401Smav 1621344401Smav CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx); 1622344650Smav mtx_unlock(&ioat->submit_lock); 1623290131Scem 1624290131Scem return (error); 1625287117Scem} 1626287117Scem 1627289908Scemstatic int 1628290229Scemsysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1629290229Scem{ 1630290229Scem struct ioat_softc *ioat; 1631290229Scem struct sbuf sb; 1632290229Scem uint64_t status; 1633290229Scem int error; 1634290229Scem 1635290229Scem ioat = arg1; 1636290229Scem 1637290229Scem status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1638290229Scem 1639290229Scem sbuf_new_for_sysctl(&sb, NULL, 256, req); 1640290229Scem switch (status) { 1641290229Scem case IOAT_CHANSTS_ACTIVE: 1642290229Scem sbuf_printf(&sb, "ACTIVE"); 1643290229Scem break; 1644290229Scem case IOAT_CHANSTS_IDLE: 1645290229Scem sbuf_printf(&sb, "IDLE"); 1646290229Scem break; 1647290229Scem case IOAT_CHANSTS_SUSPENDED: 1648290229Scem sbuf_printf(&sb, "SUSPENDED"); 1649290229Scem break; 1650290229Scem case IOAT_CHANSTS_HALTED: 1651290229Scem sbuf_printf(&sb, "HALTED"); 1652290229Scem break; 1653290229Scem case IOAT_CHANSTS_ARMED: 1654290229Scem sbuf_printf(&sb, "ARMED"); 1655290229Scem break; 1656290229Scem default: 1657290229Scem sbuf_printf(&sb, "UNKNOWN"); 1658290229Scem break; 1659290229Scem } 1660290229Scem error = sbuf_finish(&sb); 1661290229Scem sbuf_delete(&sb); 1662290229Scem 1663290229Scem if (error != 0 || req->newptr == NULL) 1664290229Scem return (error); 1665290229Scem return (EINVAL); 1666290229Scem} 1667290229Scem 1668290229Scemstatic int 1669292226Scemsysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1670292226Scem{ 1671292226Scem struct ioat_softc *ioat; 1672292226Scem struct sbuf sb; 1673292226Scem#define PRECISION "1" 1674292226Scem const uintmax_t factor = 10; 1675292226Scem uintmax_t rate; 1676292226Scem int error; 1677292226Scem 1678292226Scem ioat = arg1; 1679292226Scem sbuf_new_for_sysctl(&sb, NULL, 16, req); 1680292226Scem 1681292226Scem if (ioat->stats.interrupts == 0) { 1682292226Scem sbuf_printf(&sb, "NaN"); 1683292226Scem goto out; 1684292226Scem } 1685292226Scem rate = ioat->stats.descriptors_processed * factor / 1686292226Scem ioat->stats.interrupts; 1687292226Scem sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1688292226Scem rate % factor); 1689292226Scem#undef PRECISION 1690292226Scemout: 1691292226Scem error = sbuf_finish(&sb); 1692292226Scem sbuf_delete(&sb); 1693292226Scem if (error != 0 || req->newptr == NULL) 1694292226Scem return (error); 1695292226Scem return (EINVAL); 1696292226Scem} 1697292226Scem 1698292226Scemstatic int 1699289908Scemsysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1700289908Scem{ 1701289908Scem struct ioat_softc *ioat; 1702289908Scem int error, arg; 1703289908Scem 1704289908Scem ioat = arg1; 1705289908Scem 1706289908Scem arg = 0; 1707289908Scem error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1708289908Scem if (error != 0 || req->newptr == NULL) 1709289908Scem return (error); 1710289908Scem 1711289908Scem error = SYSCTL_IN(req, &arg, sizeof(arg)); 1712289908Scem if (error != 0) 1713289908Scem return (error); 1714289908Scem 1715289908Scem if (arg != 0) 1716289908Scem error = ioat_reset_hw(ioat); 1717289908Scem 1718289908Scem return (error); 1719289908Scem} 1720289908Scem 1721287117Scemstatic void 1722287117Scemdump_descriptor(void *hw_desc) 1723287117Scem{ 1724287117Scem int i, j; 1725287117Scem 1726287117Scem for (i = 0; i < 2; i++) { 1727287117Scem for (j = 0; j < 8; j++) 1728287117Scem printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1729287117Scem printf("\n"); 1730287117Scem } 1731287117Scem} 1732287117Scem 1733287117Scemstatic void 1734287117Scemioat_setup_sysctl(device_t device) 1735287117Scem{ 1736292226Scem struct sysctl_oid_list *par, *statpar, *state, *hammer; 1737289908Scem struct sysctl_ctx_list *ctx; 1738292226Scem struct sysctl_oid *tree, *tmp; 1739287117Scem struct ioat_softc *ioat; 1740287117Scem 1741287117Scem ioat = DEVICE2SOFTC(device); 1742289908Scem ctx = device_get_sysctl_ctx(device); 1743289908Scem tree = device_get_sysctl_tree(device); 1744289908Scem par = SYSCTL_CHILDREN(tree); 1745287117Scem 1746289980Scem SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1747289980Scem &ioat->version, 0, "HW version (0xMM form)"); 1748289980Scem SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1749289980Scem &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1750292228Scem SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 1751292228Scem &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 1752292228Scem SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 1753292228Scem &ioat->intrdelay_max, 0, 1754292228Scem "Maximum configurable INTRDELAY on this channel (microseconds)"); 1755289980Scem 1756292226Scem tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 1757292226Scem "IOAT channel internal state"); 1758292226Scem state = SYSCTL_CHILDREN(tmp); 1759292226Scem 1760292226Scem SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1761289982Scem &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1762292226Scem SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 1763292226Scem 0, "SW descriptor head pointer index"); 1764292226Scem SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 1765292226Scem 0, "SW descriptor tail pointer index"); 1766289908Scem 1767292226Scem SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 1768289980Scem ioat->comp_update, "HW addr of last completion"); 1769289980Scem 1770344401Smav SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing", 1771344401Smav CTLFLAG_RD, &ioat->is_submitter_processing, 0, 1772344401Smav "submitter processing"); 1773289980Scem 1774292226Scem SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 1775292226Scem CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1776292226Scem "String of the channel status"); 1777292226Scem 1778292228Scem SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 1779292228Scem &ioat->cached_intrdelay, 0, 1780292228Scem "Current INTRDELAY on this channel (cached, microseconds)"); 1781292228Scem 1782292226Scem tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 1783292226Scem "Big hammers (mostly for testing)"); 1784292226Scem hammer = SYSCTL_CHILDREN(tmp); 1785292226Scem 1786292226Scem SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 1787289908Scem CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1788289908Scem "Set to non-zero to reset the hardware"); 1789292226Scem 1790292226Scem tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 1791292226Scem "IOAT channel statistics"); 1792292226Scem statpar = SYSCTL_CHILDREN(tmp); 1793292226Scem 1794292226Scem SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 1795292226Scem &ioat->stats.interrupts, 1796292226Scem "Number of interrupts processed on this channel"); 1797292226Scem SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 1798292226Scem &ioat->stats.descriptors_processed, 1799292226Scem "Number of descriptors processed on this channel"); 1800292226Scem SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 1801292226Scem &ioat->stats.descriptors_submitted, 1802292226Scem "Number of descriptors submitted to this channel"); 1803292226Scem SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 1804292226Scem &ioat->stats.descriptors_error, 1805292226Scem "Number of descriptors failed by channel errors"); 1806292226Scem SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 1807292226Scem &ioat->stats.channel_halts, 0, 1808292226Scem "Number of times the channel has halted"); 1809292226Scem SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 1810292226Scem &ioat->stats.last_halt_chanerr, 0, 1811292226Scem "The raw CHANERR when the channel was last halted"); 1812292226Scem 1813292226Scem SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 1814292226Scem CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 1815292226Scem "Descriptors per interrupt"); 1816287117Scem} 1817289907Scem 1818344650Smavstatic void 1819344650Smavioat_get(struct ioat_softc *ioat) 1820289907Scem{ 1821289907Scem 1822344650Smav mtx_assert(&ioat->submit_lock, MA_OWNED); 1823344650Smav KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow")); 1824289907Scem 1825344650Smav ioat->refcnt++; 1826289907Scem} 1827289907Scem 1828344650Smavstatic void 1829344650Smavioat_put(struct ioat_softc *ioat) 1830289907Scem{ 1831290229Scem 1832344650Smav mtx_assert(&ioat->submit_lock, MA_OWNED); 1833344650Smav KASSERT(ioat->refcnt >= 1, ("refcnt error")); 1834290229Scem 1835344650Smav if (--ioat->refcnt == 0) 1836344650Smav wakeup(&ioat->refcnt); 1837290229Scem} 1838290229Scem 1839289907Scemstatic void 1840290131Scemioat_drain_locked(struct ioat_softc *ioat) 1841289907Scem{ 1842289907Scem 1843344650Smav mtx_assert(&ioat->submit_lock, MA_OWNED); 1844344650Smav 1845289907Scem while (ioat->refcnt > 0) 1846344650Smav msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0); 1847289907Scem} 1848301712Scem 1849301712Scem#ifdef DDB 1850301712Scem#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 1851301712Scem#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 1852301712ScemDB_SHOW_COMMAND(ioat, db_show_ioat) 1853301712Scem{ 1854301712Scem struct ioat_softc *sc; 1855301712Scem unsigned idx; 1856301712Scem 1857301712Scem if (!have_addr) 1858301712Scem goto usage; 1859301712Scem idx = (unsigned)addr; 1860344401Smav if (idx >= ioat_channel_index) 1861301712Scem goto usage; 1862301712Scem 1863301712Scem sc = ioat_channel[idx]; 1864301712Scem db_printf("ioat softc at %p\n", sc); 1865301712Scem if (sc == NULL) 1866301712Scem return; 1867301712Scem 1868301712Scem db_printf(" version: %d\n", sc->version); 1869301712Scem db_printf(" chan_idx: %u\n", sc->chan_idx); 1870301712Scem db_printf(" submit_lock: "); 1871301712Scem db_show_lock(&sc->submit_lock); 1872301712Scem 1873301712Scem db_printf(" capabilities: %b\n", (int)sc->capabilities, 1874301712Scem IOAT_DMACAP_STR); 1875301712Scem db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 1876301712Scem db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 1877301712Scem 1878302352Scem db_printf(" poll_timer:\n"); 1879302352Scem db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 1880302352Scem db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 1881302352Scem db_printf(" c_func: %p\n", sc->poll_timer.c_func); 1882302352Scem db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 1883302352Scem db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 1884301712Scem 1885301712Scem db_printf(" quiescing: %d\n", (int)sc->quiescing); 1886301712Scem db_printf(" destroying: %d\n", (int)sc->destroying); 1887344401Smav db_printf(" is_submitter_processing: %d\n", 1888344401Smav (int)sc->is_submitter_processing); 1889301712Scem db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 1890302353Scem db_printf(" resetting: %d\n", (int)sc->resetting); 1891301712Scem 1892301712Scem db_printf(" head: %u\n", sc->head); 1893301712Scem db_printf(" tail: %u\n", sc->tail); 1894301712Scem db_printf(" ring_size_order: %u\n", sc->ring_size_order); 1895301712Scem db_printf(" last_seen: 0x%lx\n", sc->last_seen); 1896301712Scem db_printf(" ring: %p\n", sc->ring); 1897344401Smav db_printf(" descriptors: %p\n", sc->hw_desc_ring); 1898344401Smav db_printf(" descriptors (phys): 0x%jx\n", 1899344401Smav (uintmax_t)sc->hw_desc_bus_addr); 1900301712Scem 1901344401Smav db_printf(" ring[%u] (tail):\n", sc->tail % 1902344401Smav (1 << sc->ring_size_order)); 1903344401Smav db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id); 1904344401Smav db_printf(" addr: 0x%lx\n", 1905344401Smav RING_PHYS_ADDR(sc, sc->tail)); 1906344401Smav db_printf(" next: 0x%lx\n", 1907344401Smav ioat_get_descriptor(sc, sc->tail)->generic.next); 1908344401Smav 1909344401Smav db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) % 1910344401Smav (1 << sc->ring_size_order)); 1911344401Smav db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id); 1912344401Smav db_printf(" addr: 0x%lx\n", 1913344401Smav RING_PHYS_ADDR(sc, sc->head - 1)); 1914344401Smav db_printf(" next: 0x%lx\n", 1915344401Smav ioat_get_descriptor(sc, sc->head - 1)->generic.next); 1916344401Smav 1917344401Smav db_printf(" ring[%u] (head):\n", (sc->head) % 1918344401Smav (1 << sc->ring_size_order)); 1919344401Smav db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id); 1920344401Smav db_printf(" addr: 0x%lx\n", 1921344401Smav RING_PHYS_ADDR(sc, sc->head)); 1922344401Smav db_printf(" next: 0x%lx\n", 1923344401Smav ioat_get_descriptor(sc, sc->head)->generic.next); 1924344401Smav 1925344401Smav for (idx = 0; idx < (1 << sc->ring_size_order); idx++) 1926344401Smav if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK) 1927344401Smav == RING_PHYS_ADDR(sc, idx)) 1928344401Smav db_printf(" ring[%u] == hardware tail\n", idx); 1929344401Smav 1930301712Scem db_printf(" cleanup_lock: "); 1931301712Scem db_show_lock(&sc->cleanup_lock); 1932301712Scem 1933301712Scem db_printf(" refcnt: %u\n", sc->refcnt); 1934301712Scem db_printf(" stats:\n"); 1935301712Scem db_printf(" interrupts: %lu\n", sc->stats.interrupts); 1936301712Scem db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 1937301712Scem db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 1938301712Scem db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 1939301712Scem 1940301712Scem db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 1941301712Scem db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 1942301712Scem 1943301712Scem if (db_pager_quit) 1944301712Scem return; 1945301712Scem 1946301712Scem db_printf(" hw status:\n"); 1947301712Scem db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 1948301712Scem db_printf(" chanctrl: 0x%x\n", 1949301712Scem (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 1950301712Scem db_printf(" chancmd: 0x%x\n", 1951301712Scem (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 1952301712Scem db_printf(" dmacount: 0x%x\n", 1953301712Scem (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 1954301712Scem db_printf(" chainaddr: 0x%lx\n", 1955301712Scem ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 1956301712Scem db_printf(" chancmp: 0x%lx\n", 1957301712Scem ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 1958301712Scem db_printf(" chanerr: %b\n", 1959301712Scem (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 1960301712Scem return; 1961301712Scemusage: 1962301712Scem db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 1963301712Scem return; 1964301712Scem} 1965301712Scem#endif /* DDB */ 1966