ioat.c revision 289905
1287117Scem/*-
2287117Scem * Copyright (C) 2012 Intel Corporation
3287117Scem * All rights reserved.
4287117Scem *
5287117Scem * Redistribution and use in source and binary forms, with or without
6287117Scem * modification, are permitted provided that the following conditions
7287117Scem * are met:
8287117Scem * 1. Redistributions of source code must retain the above copyright
9287117Scem *    notice, this list of conditions and the following disclaimer.
10287117Scem * 2. Redistributions in binary form must reproduce the above copyright
11287117Scem *    notice, this list of conditions and the following disclaimer in the
12287117Scem *    documentation and/or other materials provided with the distribution.
13287117Scem *
14287117Scem * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15287117Scem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16287117Scem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17287117Scem * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18287117Scem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19287117Scem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20287117Scem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21287117Scem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22287117Scem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23287117Scem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24287117Scem * SUCH DAMAGE.
25287117Scem */
26287117Scem
27287117Scem#include <sys/cdefs.h>
28287117Scem__FBSDID("$FreeBSD: head/sys/dev/ioat/ioat.c 289905 2015-10-24 23:45:10Z cem $");
29287117Scem
30287117Scem#include <sys/param.h>
31287117Scem#include <sys/systm.h>
32287117Scem#include <sys/bus.h>
33287117Scem#include <sys/conf.h>
34287117Scem#include <sys/ioccom.h>
35287117Scem#include <sys/kernel.h>
36287117Scem#include <sys/lock.h>
37287117Scem#include <sys/malloc.h>
38287117Scem#include <sys/module.h>
39287117Scem#include <sys/mutex.h>
40287117Scem#include <sys/rman.h>
41287117Scem#include <sys/sysctl.h>
42287117Scem#include <sys/time.h>
43287117Scem#include <dev/pci/pcireg.h>
44287117Scem#include <dev/pci/pcivar.h>
45287117Scem#include <machine/bus.h>
46287117Scem#include <machine/resource.h>
47287117Scem#include <machine/stdarg.h>
48287117Scem
49287117Scem#include "ioat.h"
50287117Scem#include "ioat_hw.h"
51287117Scem#include "ioat_internal.h"
52287117Scem
53289904Scem#define	IOAT_INTR_TIMO	(hz / 10)
54289904Scem
55287117Scemstatic int ioat_probe(device_t device);
56287117Scemstatic int ioat_attach(device_t device);
57287117Scemstatic int ioat_detach(device_t device);
58287403Scemstatic int ioat_setup_intr(struct ioat_softc *ioat);
59287403Scemstatic int ioat_teardown_intr(struct ioat_softc *ioat);
60287117Scemstatic int ioat3_attach(device_t device);
61289760Scemstatic int ioat3_selftest(struct ioat_softc *ioat);
62287117Scemstatic int ioat_map_pci_bar(struct ioat_softc *ioat);
63287117Scemstatic void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
64287117Scem    int error);
65287117Scemstatic void ioat_interrupt_handler(void *arg);
66287414Scemstatic boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
67287117Scemstatic void ioat_process_events(struct ioat_softc *ioat);
68287117Scemstatic inline uint32_t ioat_get_active(struct ioat_softc *ioat);
69287117Scemstatic inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
70287117Scemstatic void ioat_free_ring_entry(struct ioat_softc *ioat,
71287117Scem    struct ioat_descriptor *desc);
72287117Scemstatic struct ioat_descriptor * ioat_alloc_ring_entry(struct ioat_softc *ioat);
73287117Scemstatic int ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs);
74287117Scemstatic struct ioat_descriptor * ioat_get_ring_entry(struct ioat_softc *ioat,
75287117Scem    uint32_t index);
76287117Scemstatic boolean_t resize_ring(struct ioat_softc *ioat, int order);
77287117Scemstatic void ioat_timer_callback(void *arg);
78287117Scemstatic void dump_descriptor(void *hw_desc);
79287117Scemstatic void ioat_submit_single(struct ioat_softc *ioat);
80287117Scemstatic void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
81287117Scem    int error);
82287117Scemstatic int ioat_reset_hw(struct ioat_softc *ioat);
83287117Scemstatic void ioat_setup_sysctl(device_t device);
84287117Scem
85289776Scem#define	ioat_log_message(v, ...) do {					\
86289776Scem	if ((v) <= g_ioat_debug_level) {				\
87289776Scem		device_printf(ioat->device, __VA_ARGS__);		\
88289776Scem	}								\
89289776Scem} while (0)
90289776Scem
91287117ScemMALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
92287117ScemSYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
93287117Scem
94287117Scemstatic int g_force_legacy_interrupts;
95287117ScemSYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
96287117Scem    &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
97287117Scem
98289776Scemint g_ioat_debug_level = 0;
99287117ScemSYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
100287117Scem    0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
101287117Scem
102287117Scem/*
103287117Scem * OS <-> Driver interface structures
104287117Scem */
105287117Scemstatic device_method_t ioat_pci_methods[] = {
106287117Scem	/* Device interface */
107287117Scem	DEVMETHOD(device_probe,     ioat_probe),
108287117Scem	DEVMETHOD(device_attach,    ioat_attach),
109287117Scem	DEVMETHOD(device_detach,    ioat_detach),
110287117Scem	{ 0, 0 }
111287117Scem};
112287117Scem
113287117Scemstatic driver_t ioat_pci_driver = {
114287117Scem	"ioat",
115287117Scem	ioat_pci_methods,
116287117Scem	sizeof(struct ioat_softc),
117287117Scem};
118287117Scem
119287117Scemstatic devclass_t ioat_devclass;
120287117ScemDRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
121287117Scem
122287117Scem/*
123287117Scem * Private data structures
124287117Scem */
125287117Scemstatic struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
126287117Scemstatic int ioat_channel_index = 0;
127287117ScemSYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
128287117Scem    "Number of IOAT channels attached");
129287117Scem
130287117Scemstatic struct _pcsid
131287117Scem{
132287117Scem	u_int32_t   type;
133287117Scem	const char  *desc;
134287117Scem} pci_ids[] = {
135287117Scem	{ 0x34308086, "TBG IOAT Ch0" },
136287117Scem	{ 0x34318086, "TBG IOAT Ch1" },
137287117Scem	{ 0x34328086, "TBG IOAT Ch2" },
138287117Scem	{ 0x34338086, "TBG IOAT Ch3" },
139287117Scem	{ 0x34298086, "TBG IOAT Ch4" },
140287117Scem	{ 0x342a8086, "TBG IOAT Ch5" },
141287117Scem	{ 0x342b8086, "TBG IOAT Ch6" },
142287117Scem	{ 0x342c8086, "TBG IOAT Ch7" },
143287117Scem
144287117Scem	{ 0x37108086, "JSF IOAT Ch0" },
145287117Scem	{ 0x37118086, "JSF IOAT Ch1" },
146287117Scem	{ 0x37128086, "JSF IOAT Ch2" },
147287117Scem	{ 0x37138086, "JSF IOAT Ch3" },
148287117Scem	{ 0x37148086, "JSF IOAT Ch4" },
149287117Scem	{ 0x37158086, "JSF IOAT Ch5" },
150287117Scem	{ 0x37168086, "JSF IOAT Ch6" },
151287117Scem	{ 0x37178086, "JSF IOAT Ch7" },
152287117Scem	{ 0x37188086, "JSF IOAT Ch0 (RAID)" },
153287117Scem	{ 0x37198086, "JSF IOAT Ch1 (RAID)" },
154287117Scem
155287117Scem	{ 0x3c208086, "SNB IOAT Ch0" },
156287117Scem	{ 0x3c218086, "SNB IOAT Ch1" },
157287117Scem	{ 0x3c228086, "SNB IOAT Ch2" },
158287117Scem	{ 0x3c238086, "SNB IOAT Ch3" },
159287117Scem	{ 0x3c248086, "SNB IOAT Ch4" },
160287117Scem	{ 0x3c258086, "SNB IOAT Ch5" },
161287117Scem	{ 0x3c268086, "SNB IOAT Ch6" },
162287117Scem	{ 0x3c278086, "SNB IOAT Ch7" },
163287117Scem	{ 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
164287117Scem	{ 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
165287117Scem
166287117Scem	{ 0x0e208086, "IVB IOAT Ch0" },
167287117Scem	{ 0x0e218086, "IVB IOAT Ch1" },
168287117Scem	{ 0x0e228086, "IVB IOAT Ch2" },
169287117Scem	{ 0x0e238086, "IVB IOAT Ch3" },
170287117Scem	{ 0x0e248086, "IVB IOAT Ch4" },
171287117Scem	{ 0x0e258086, "IVB IOAT Ch5" },
172287117Scem	{ 0x0e268086, "IVB IOAT Ch6" },
173287117Scem	{ 0x0e278086, "IVB IOAT Ch7" },
174287117Scem	{ 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
175287117Scem	{ 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
176287117Scem
177287117Scem	{ 0x2f208086, "HSW IOAT Ch0" },
178287117Scem	{ 0x2f218086, "HSW IOAT Ch1" },
179287117Scem	{ 0x2f228086, "HSW IOAT Ch2" },
180287117Scem	{ 0x2f238086, "HSW IOAT Ch3" },
181287117Scem	{ 0x2f248086, "HSW IOAT Ch4" },
182287117Scem	{ 0x2f258086, "HSW IOAT Ch5" },
183287117Scem	{ 0x2f268086, "HSW IOAT Ch6" },
184287117Scem	{ 0x2f278086, "HSW IOAT Ch7" },
185287117Scem	{ 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
186287117Scem	{ 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
187287117Scem
188287117Scem	{ 0x0c508086, "BWD IOAT Ch0" },
189287117Scem	{ 0x0c518086, "BWD IOAT Ch1" },
190287117Scem	{ 0x0c528086, "BWD IOAT Ch2" },
191287117Scem	{ 0x0c538086, "BWD IOAT Ch3" },
192287117Scem
193287117Scem	{ 0x6f508086, "BDXDE IOAT Ch0" },
194287117Scem	{ 0x6f518086, "BDXDE IOAT Ch1" },
195287117Scem	{ 0x6f528086, "BDXDE IOAT Ch2" },
196287117Scem	{ 0x6f538086, "BDXDE IOAT Ch3" },
197287117Scem
198287117Scem	{ 0x00000000, NULL           }
199287117Scem};
200287117Scem
201287117Scem/*
202287117Scem * OS <-> Driver linkage functions
203287117Scem */
204287117Scemstatic int
205287117Scemioat_probe(device_t device)
206287117Scem{
207287117Scem	struct _pcsid *ep;
208287117Scem	u_int32_t type;
209287117Scem
210287117Scem	type = pci_get_devid(device);
211287117Scem	for (ep = pci_ids; ep->type; ep++) {
212287117Scem		if (ep->type == type) {
213287117Scem			device_set_desc(device, ep->desc);
214287117Scem			return (0);
215287117Scem		}
216287117Scem	}
217287117Scem	return (ENXIO);
218287117Scem}
219287117Scem
220287117Scemstatic int
221287117Scemioat_attach(device_t device)
222287117Scem{
223287117Scem	struct ioat_softc *ioat;
224287117Scem	int error;
225287117Scem
226287117Scem	ioat = DEVICE2SOFTC(device);
227287117Scem	ioat->device = device;
228287117Scem
229287117Scem	error = ioat_map_pci_bar(ioat);
230287117Scem	if (error != 0)
231287117Scem		goto err;
232287117Scem
233287117Scem	ioat->version = ioat_read_cbver(ioat);
234287117Scem	if (ioat->version < IOAT_VER_3_0) {
235287117Scem		error = ENODEV;
236287117Scem		goto err;
237287117Scem	}
238287117Scem
239287403Scem	error = ioat_setup_intr(ioat);
240287403Scem	if (error != 0)
241287403Scem		return (error);
242287403Scem
243287117Scem	error = ioat3_attach(device);
244287117Scem	if (error != 0)
245287117Scem		goto err;
246287117Scem
247287117Scem	error = pci_enable_busmaster(device);
248287117Scem	if (error != 0)
249287117Scem		goto err;
250287117Scem
251289760Scem	error = ioat3_selftest(ioat);
252289760Scem	if (error != 0)
253289760Scem		return (error);
254289760Scem
255289760Scem	ioat_process_events(ioat);
256289760Scem	ioat_setup_sysctl(device);
257289760Scem
258287117Scem	ioat_channel[ioat_channel_index++] = ioat;
259289760Scem	ioat_test_attach();
260287117Scem
261287117Scemerr:
262287117Scem	if (error != 0)
263287117Scem		ioat_detach(device);
264287117Scem	return (error);
265287117Scem}
266287117Scem
267287117Scemstatic int
268287117Scemioat_detach(device_t device)
269287117Scem{
270287117Scem	struct ioat_softc *ioat;
271287117Scem	uint32_t i;
272287117Scem
273287117Scem	ioat = DEVICE2SOFTC(device);
274289760Scem
275289760Scem	ioat_test_detach();
276289904Scem
277289904Scem	ioat_teardown_intr(ioat);
278287117Scem	callout_drain(&ioat->timer);
279287117Scem
280287117Scem	pci_disable_busmaster(device);
281287117Scem
282287117Scem	if (ioat->pci_resource != NULL)
283287117Scem		bus_release_resource(device, SYS_RES_MEMORY,
284287117Scem		    ioat->pci_resource_id, ioat->pci_resource);
285287117Scem
286287117Scem	if (ioat->ring != NULL) {
287287117Scem		for (i = 0; i < (1 << ioat->ring_size_order); i++)
288287117Scem			ioat_free_ring_entry(ioat, ioat->ring[i]);
289287117Scem		free(ioat->ring, M_IOAT);
290287117Scem	}
291287117Scem
292287117Scem	if (ioat->comp_update != NULL) {
293287117Scem		bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
294287117Scem		bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
295287117Scem		    ioat->comp_update_map);
296287117Scem		bus_dma_tag_destroy(ioat->comp_update_tag);
297287117Scem	}
298287117Scem
299287117Scem	bus_dma_tag_destroy(ioat->hw_desc_tag);
300287117Scem
301287403Scem	return (0);
302287403Scem}
303287403Scem
304287403Scemstatic int
305287403Scemioat_teardown_intr(struct ioat_softc *ioat)
306287403Scem{
307287403Scem
308287117Scem	if (ioat->tag != NULL)
309287403Scem		bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
310287117Scem
311287117Scem	if (ioat->res != NULL)
312287403Scem		bus_release_resource(ioat->device, SYS_RES_IRQ,
313287117Scem		    rman_get_rid(ioat->res), ioat->res);
314287117Scem
315287403Scem	pci_release_msi(ioat->device);
316287117Scem	return (0);
317287117Scem}
318287117Scem
319287117Scemstatic int
320287117Scemioat3_selftest(struct ioat_softc *ioat)
321287117Scem{
322287117Scem	uint64_t status;
323287117Scem	uint32_t chanerr;
324287117Scem	int i;
325287117Scem
326287117Scem	ioat_acquire(&ioat->dmaengine);
327287117Scem	ioat_null(&ioat->dmaengine, NULL, NULL, 0);
328287117Scem	ioat_release(&ioat->dmaengine);
329287117Scem
330287117Scem	for (i = 0; i < 100; i++) {
331287117Scem		DELAY(1);
332287117Scem		status = ioat_get_chansts(ioat);
333287117Scem		if (is_ioat_idle(status))
334287117Scem			return (0);
335287117Scem	}
336287117Scem
337287117Scem	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
338287117Scem	ioat_log_message(0, "could not start channel: "
339287117Scem	    "status = %#jx error = %x\n", (uintmax_t)status, chanerr);
340287117Scem	return (ENXIO);
341287117Scem}
342287117Scem
343287117Scem/*
344287117Scem * Initialize Hardware
345287117Scem */
346287117Scemstatic int
347287117Scemioat3_attach(device_t device)
348287117Scem{
349287117Scem	struct ioat_softc *ioat;
350287117Scem	struct ioat_descriptor **ring;
351287117Scem	struct ioat_descriptor *next;
352287117Scem	struct ioat_dma_hw_descriptor *dma_hw_desc;
353287117Scem	uint32_t capabilities;
354287117Scem	int i, num_descriptors;
355287117Scem	int error;
356287117Scem	uint8_t xfercap;
357287117Scem
358287117Scem	error = 0;
359287117Scem	ioat = DEVICE2SOFTC(device);
360287117Scem	capabilities = ioat_read_dmacapability(ioat);
361287117Scem
362287117Scem	xfercap = ioat_read_xfercap(ioat);
363287117Scem	ioat->max_xfer_size = 1 << xfercap;
364287117Scem
365287117Scem	/* TODO: need to check DCA here if we ever do XOR/PQ */
366287117Scem
367287117Scem	mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
368287117Scem	mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF);
369289760Scem	callout_init(&ioat->timer, 1);
370287117Scem
371287117Scem	ioat->is_resize_pending = FALSE;
372287117Scem	ioat->is_completion_pending = FALSE;
373287117Scem	ioat->is_reset_pending = FALSE;
374287117Scem	ioat->is_channel_running = FALSE;
375287117Scem	ioat->is_waiting_for_ack = FALSE;
376287117Scem
377287117Scem	bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
378287117Scem	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
379287117Scem	    sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
380287117Scem	    &ioat->comp_update_tag);
381287117Scem
382287117Scem	error = bus_dmamem_alloc(ioat->comp_update_tag,
383287117Scem	    (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
384287117Scem	if (ioat->comp_update == NULL)
385287117Scem		return (ENOMEM);
386287117Scem
387287117Scem	error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
388287117Scem	    ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
389287117Scem	    0);
390287117Scem	if (error != 0)
391287117Scem		return (error);
392287117Scem
393287117Scem	ioat->ring_size_order = IOAT_MIN_ORDER;
394287117Scem
395287117Scem	num_descriptors = 1 << ioat->ring_size_order;
396287117Scem
397287117Scem	bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
398287117Scem	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
399287117Scem	    sizeof(struct ioat_dma_hw_descriptor), 1,
400287117Scem	    sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
401287117Scem	    &ioat->hw_desc_tag);
402287117Scem
403287117Scem	ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
404287117Scem	    M_ZERO | M_NOWAIT);
405287117Scem	if (ioat->ring == NULL)
406287117Scem		return (ENOMEM);
407287117Scem
408287117Scem	ring = ioat->ring;
409287117Scem	for (i = 0; i < num_descriptors; i++) {
410287117Scem		ring[i] = ioat_alloc_ring_entry(ioat);
411287117Scem		if (ring[i] == NULL)
412287117Scem			return (ENOMEM);
413287117Scem
414287117Scem		ring[i]->id = i;
415287117Scem	}
416287117Scem
417287117Scem	for (i = 0; i < num_descriptors - 1; i++) {
418287117Scem		next = ring[i + 1];
419287117Scem		dma_hw_desc = ring[i]->u.dma;
420287117Scem
421287117Scem		dma_hw_desc->next = next->hw_desc_bus_addr;
422287117Scem	}
423287117Scem
424287117Scem	ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
425287117Scem
426287117Scem	ioat->head = 0;
427287117Scem	ioat->tail = 0;
428287117Scem	ioat->last_seen = 0;
429287117Scem
430287117Scem	error = ioat_reset_hw(ioat);
431287117Scem	if (error != 0)
432287117Scem		return (error);
433287117Scem
434287117Scem	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
435287117Scem	ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
436287117Scem	ioat_write_chainaddr(ioat, ring[0]->hw_desc_bus_addr);
437287117Scem	return (0);
438287117Scem}
439287117Scem
440287117Scemstatic int
441287117Scemioat_map_pci_bar(struct ioat_softc *ioat)
442287117Scem{
443287117Scem
444287117Scem	ioat->pci_resource_id = PCIR_BAR(0);
445287117Scem	ioat->pci_resource = bus_alloc_resource(ioat->device, SYS_RES_MEMORY,
446287117Scem	    &ioat->pci_resource_id, 0, ~0, 1, RF_ACTIVE);
447287117Scem
448287117Scem	if (ioat->pci_resource == NULL) {
449287117Scem		ioat_log_message(0, "unable to allocate pci resource\n");
450287117Scem		return (ENODEV);
451287117Scem	}
452287117Scem
453287117Scem	ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
454287117Scem	ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
455287117Scem	return (0);
456287117Scem}
457287117Scem
458287117Scemstatic void
459287117Scemioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
460287117Scem{
461287117Scem	struct ioat_softc *ioat = arg;
462287117Scem
463287117Scem	ioat->comp_update_bus_addr = seg[0].ds_addr;
464287117Scem}
465287117Scem
466287117Scemstatic void
467287117Scemioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
468287117Scem{
469287117Scem	bus_addr_t *baddr;
470287117Scem
471287117Scem	baddr = arg;
472287117Scem	*baddr = segs->ds_addr;
473287117Scem}
474287117Scem
475287117Scem/*
476287117Scem * Interrupt setup and handlers
477287117Scem */
478287117Scemstatic int
479287403Scemioat_setup_intr(struct ioat_softc *ioat)
480287117Scem{
481287117Scem	uint32_t num_vectors;
482287117Scem	int error;
483287117Scem	boolean_t use_msix;
484287117Scem	boolean_t force_legacy_interrupts;
485287117Scem
486287117Scem	use_msix = FALSE;
487287117Scem	force_legacy_interrupts = FALSE;
488287117Scem
489287117Scem	if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
490287117Scem		num_vectors = 1;
491287117Scem		pci_alloc_msix(ioat->device, &num_vectors);
492287117Scem		if (num_vectors == 1)
493287117Scem			use_msix = TRUE;
494287117Scem	}
495287117Scem
496287117Scem	if (use_msix) {
497287117Scem		ioat->rid = 1;
498287117Scem		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
499287117Scem		    &ioat->rid, RF_ACTIVE);
500287117Scem	} else {
501287117Scem		ioat->rid = 0;
502287117Scem		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
503287117Scem		    &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
504287117Scem	}
505287117Scem	if (ioat->res == NULL) {
506287117Scem		ioat_log_message(0, "bus_alloc_resource failed\n");
507287117Scem		return (ENOMEM);
508287117Scem	}
509287117Scem
510287117Scem	ioat->tag = NULL;
511287117Scem	error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
512287117Scem	    INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
513287117Scem	if (error != 0) {
514287117Scem		ioat_log_message(0, "bus_setup_intr failed\n");
515287117Scem		return (error);
516287117Scem	}
517287117Scem
518287117Scem	ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
519287117Scem	return (0);
520287117Scem}
521287117Scem
522287403Scemstatic boolean_t
523287414Scemioat_model_resets_msix(struct ioat_softc *ioat)
524287403Scem{
525287403Scem	u_int32_t pciid;
526287403Scem
527287403Scem	pciid = pci_get_devid(ioat->device);
528287403Scem	switch (pciid) {
529287414Scem		/* BWD: */
530287414Scem	case 0x0c508086:
531287414Scem	case 0x0c518086:
532287414Scem	case 0x0c528086:
533287414Scem	case 0x0c538086:
534287414Scem		/* BDXDE: */
535287403Scem	case 0x6f508086:
536287403Scem	case 0x6f518086:
537287403Scem	case 0x6f528086:
538287403Scem	case 0x6f538086:
539287403Scem		return (TRUE);
540287403Scem	}
541287403Scem
542287403Scem	return (FALSE);
543287403Scem}
544287403Scem
545287117Scemstatic void
546287117Scemioat_interrupt_handler(void *arg)
547287117Scem{
548287117Scem	struct ioat_softc *ioat = arg;
549287117Scem
550287117Scem	ioat_process_events(ioat);
551287117Scem}
552287117Scem
553287117Scemstatic void
554287117Scemioat_process_events(struct ioat_softc *ioat)
555287117Scem{
556287117Scem	struct ioat_descriptor *desc;
557287117Scem	struct bus_dmadesc *dmadesc;
558287117Scem	uint64_t comp_update, status;
559287117Scem	uint32_t completed;
560287117Scem
561287117Scem	mtx_lock(&ioat->cleanup_lock);
562287117Scem
563287117Scem	completed = 0;
564287117Scem	comp_update = *ioat->comp_update;
565287117Scem	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
566287117Scem
567287117Scem	ioat_log_message(3, "%s\n", __func__);
568287117Scem
569287117Scem	if (status == ioat->last_seen) {
570287117Scem	 	mtx_unlock(&ioat->cleanup_lock);
571287117Scem		return;
572287117Scem	}
573287117Scem
574287117Scem	while (1) {
575287117Scem		desc = ioat_get_ring_entry(ioat, ioat->tail);
576287117Scem		dmadesc = &desc->bus_dmadesc;
577287117Scem		ioat_log_message(3, "completing desc %d\n", ioat->tail);
578287117Scem
579287117Scem		if (dmadesc->callback_fn)
580287117Scem			(*dmadesc->callback_fn)(dmadesc->callback_arg);
581287117Scem
582287117Scem		ioat->tail++;
583287117Scem		if (desc->hw_desc_bus_addr == status)
584287117Scem			break;
585287117Scem	}
586287117Scem
587287117Scem	ioat->last_seen = desc->hw_desc_bus_addr;
588287117Scem
589287117Scem	if (ioat->head == ioat->tail) {
590287117Scem		ioat->is_completion_pending = FALSE;
591289904Scem		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
592289904Scem		    ioat_timer_callback, ioat);
593287117Scem	}
594287117Scem
595287117Scem	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
596287117Scem	mtx_unlock(&ioat->cleanup_lock);
597287117Scem}
598287117Scem
599287117Scem/*
600287117Scem * User API functions
601287117Scem */
602287117Scembus_dmaengine_t
603287117Scemioat_get_dmaengine(uint32_t index)
604287117Scem{
605287117Scem
606287117Scem	if (index < ioat_channel_index)
607287117Scem		return (&ioat_channel[index]->dmaengine);
608287117Scem	return (NULL);
609287117Scem}
610287117Scem
611287117Scemvoid
612287117Scemioat_acquire(bus_dmaengine_t dmaengine)
613287117Scem{
614287117Scem	struct ioat_softc *ioat;
615287117Scem
616287117Scem	ioat = to_ioat_softc(dmaengine);
617287117Scem	mtx_lock(&ioat->submit_lock);
618287117Scem	ioat_log_message(3, "%s\n", __func__);
619287117Scem}
620287117Scem
621287117Scemvoid
622287117Scemioat_release(bus_dmaengine_t dmaengine)
623287117Scem{
624287117Scem	struct ioat_softc *ioat;
625287117Scem
626289776Scem	ioat = to_ioat_softc(dmaengine);
627287117Scem	ioat_log_message(3, "%s\n", __func__);
628287117Scem	ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->head);
629287117Scem	mtx_unlock(&ioat->submit_lock);
630287117Scem}
631287117Scem
632287117Scemstruct bus_dmadesc *
633287117Scemioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
634287117Scem    void *callback_arg, uint32_t flags)
635287117Scem{
636287117Scem	struct ioat_softc *ioat;
637287117Scem	struct ioat_descriptor *desc;
638287117Scem	struct ioat_dma_hw_descriptor *hw_desc;
639287117Scem
640287117Scem	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
641287117Scem		flags & ~DMA_ALL_FLAGS));
642287117Scem
643287117Scem	ioat = to_ioat_softc(dmaengine);
644287117Scem
645287117Scem	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
646287117Scem		return (NULL);
647287117Scem
648287117Scem	ioat_log_message(3, "%s\n", __func__);
649287117Scem
650287117Scem	desc = ioat_get_ring_entry(ioat, ioat->head);
651287117Scem	hw_desc = desc->u.dma;
652287117Scem
653287117Scem	hw_desc->u.control_raw = 0;
654287117Scem	hw_desc->u.control.null = 1;
655287117Scem	hw_desc->u.control.completion_update = 1;
656287117Scem
657287117Scem	if ((flags & DMA_INT_EN) != 0)
658287117Scem		hw_desc->u.control.int_enable = 1;
659287117Scem
660287117Scem	hw_desc->size = 8;
661287117Scem	hw_desc->src_addr = 0;
662287117Scem	hw_desc->dest_addr = 0;
663287117Scem
664287117Scem	desc->bus_dmadesc.callback_fn = callback_fn;
665287117Scem	desc->bus_dmadesc.callback_arg = callback_arg;
666287117Scem
667287117Scem	ioat_submit_single(ioat);
668287117Scem	return (&desc->bus_dmadesc);
669287117Scem}
670287117Scem
671287117Scemstruct bus_dmadesc *
672287117Scemioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
673287117Scem    bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
674287117Scem    void *callback_arg, uint32_t flags)
675287117Scem{
676287117Scem	struct ioat_descriptor *desc;
677287117Scem	struct ioat_dma_hw_descriptor *hw_desc;
678287117Scem	struct ioat_softc *ioat;
679287117Scem
680287117Scem	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
681287117Scem		flags & ~DMA_ALL_FLAGS));
682287117Scem
683287117Scem	ioat = to_ioat_softc(dmaengine);
684287117Scem
685287117Scem	if (len > ioat->max_xfer_size) {
686287117Scem		ioat_log_message(0, "%s: max_xfer_size = %d, requested = %d\n",
687287117Scem		    __func__, ioat->max_xfer_size, (int)len);
688287117Scem		return (NULL);
689287117Scem	}
690287117Scem
691287117Scem	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
692287117Scem		return (NULL);
693287117Scem
694287117Scem	ioat_log_message(3, "%s\n", __func__);
695287117Scem
696287117Scem	desc = ioat_get_ring_entry(ioat, ioat->head);
697287117Scem	hw_desc = desc->u.dma;
698287117Scem
699287117Scem	hw_desc->u.control_raw = 0;
700287117Scem	hw_desc->u.control.completion_update = 1;
701287117Scem
702287117Scem	if ((flags & DMA_INT_EN) != 0)
703287117Scem		hw_desc->u.control.int_enable = 1;
704287117Scem
705287117Scem	hw_desc->size = len;
706287117Scem	hw_desc->src_addr = src;
707287117Scem	hw_desc->dest_addr = dst;
708287117Scem
709287117Scem	if (g_ioat_debug_level >= 3)
710287117Scem		dump_descriptor(hw_desc);
711287117Scem
712287117Scem	desc->bus_dmadesc.callback_fn = callback_fn;
713287117Scem	desc->bus_dmadesc.callback_arg = callback_arg;
714287117Scem
715287117Scem	ioat_submit_single(ioat);
716287117Scem	return (&desc->bus_dmadesc);
717287117Scem}
718287117Scem
719287117Scem/*
720287117Scem * Ring Management
721287117Scem */
722287117Scemstatic inline uint32_t
723287117Scemioat_get_active(struct ioat_softc *ioat)
724287117Scem{
725287117Scem
726287117Scem	return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
727287117Scem}
728287117Scem
729287117Scemstatic inline uint32_t
730287117Scemioat_get_ring_space(struct ioat_softc *ioat)
731287117Scem{
732287117Scem
733287117Scem	return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
734287117Scem}
735287117Scem
736287117Scemstatic struct ioat_descriptor *
737287117Scemioat_alloc_ring_entry(struct ioat_softc *ioat)
738287117Scem{
739287117Scem	struct ioat_dma_hw_descriptor *hw_desc;
740287117Scem	struct ioat_descriptor *desc;
741289905Scem	int error;
742287117Scem
743289905Scem	error = ENOMEM;
744289905Scem	hw_desc = NULL;
745289905Scem
746289905Scem	desc = malloc(sizeof(*desc), M_IOAT, M_NOWAIT);
747287117Scem	if (desc == NULL)
748289905Scem		goto out;
749287117Scem
750289905Scem	bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
751289905Scem	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &ioat->hw_desc_map);
752289905Scem	if (hw_desc == NULL)
753289905Scem		goto out;
754289905Scem
755289905Scem	desc->u.dma = hw_desc;
756289905Scem
757289905Scem	error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
758289905Scem	    sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
759289905Scem	    BUS_DMA_NOWAIT);
760289905Scem	if (error)
761289905Scem		goto out;
762289905Scem
763289905Scemout:
764289905Scem	if (error) {
765289905Scem		ioat_free_ring_entry(ioat, desc);
766287117Scem		return (NULL);
767287117Scem	}
768287117Scem	return (desc);
769287117Scem}
770287117Scem
771287117Scemstatic void
772287117Scemioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
773287117Scem{
774287117Scem
775287117Scem	if (desc == NULL)
776287117Scem		return;
777287117Scem
778287117Scem	if (desc->u.dma)
779287117Scem		bus_dmamem_free(ioat->hw_desc_tag, desc->u.dma,
780287117Scem		    ioat->hw_desc_map);
781287117Scem	free(desc, M_IOAT);
782287117Scem}
783287117Scem
784287117Scemstatic int
785287117Scemioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs)
786287117Scem{
787287117Scem	boolean_t retry;
788287117Scem
789287117Scem	while (1) {
790287117Scem		if (ioat_get_ring_space(ioat) >= num_descs)
791287117Scem			return (0);
792287117Scem
793287117Scem		mtx_lock(&ioat->cleanup_lock);
794287117Scem		retry = resize_ring(ioat, ioat->ring_size_order + 1);
795287117Scem		mtx_unlock(&ioat->cleanup_lock);
796287117Scem
797287117Scem		if (!retry)
798287117Scem			return (ENOMEM);
799287117Scem	}
800287117Scem}
801287117Scem
802287117Scemstatic struct ioat_descriptor *
803287117Scemioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
804287117Scem{
805287117Scem
806287117Scem	return (ioat->ring[index % (1 << ioat->ring_size_order)]);
807287117Scem}
808287117Scem
809287117Scemstatic boolean_t
810287117Scemresize_ring(struct ioat_softc *ioat, int order)
811287117Scem{
812287117Scem	struct ioat_descriptor **ring;
813287117Scem	struct ioat_descriptor *next;
814287117Scem	struct ioat_dma_hw_descriptor *hw;
815287117Scem	struct ioat_descriptor *ent;
816287117Scem	uint32_t current_size, active, new_size, i, new_idx, current_idx;
817287117Scem	uint32_t new_idx2;
818287117Scem
819287117Scem	current_size = 1 << ioat->ring_size_order;
820287117Scem	active = (ioat->head - ioat->tail) & (current_size - 1);
821287117Scem	new_size = 1 << order;
822287117Scem
823287117Scem	if (order > IOAT_MAX_ORDER)
824287117Scem		return (FALSE);
825287117Scem
826287117Scem	/*
827287117Scem	 * when shrinking, verify that we can hold the current active
828287117Scem	 * set in the new ring
829287117Scem	 */
830287117Scem	if (active >= new_size)
831287117Scem		return (FALSE);
832287117Scem
833287117Scem	/* allocate the array to hold the software ring */
834287117Scem	ring = malloc(new_size * sizeof(*ring), M_IOAT, M_ZERO | M_NOWAIT);
835287117Scem	if (ring == NULL)
836287117Scem		return (FALSE);
837287117Scem
838287117Scem	ioat_log_message(2, "ring resize: new: %d old: %d\n",
839287117Scem	    new_size, current_size);
840287117Scem
841287117Scem	/* allocate/trim descriptors as needed */
842287117Scem	if (new_size > current_size) {
843287117Scem		/* copy current descriptors to the new ring */
844287117Scem		for (i = 0; i < current_size; i++) {
845287117Scem			current_idx = (ioat->tail + i) & (current_size - 1);
846287117Scem			new_idx = (ioat->tail + i) & (new_size - 1);
847287117Scem
848287117Scem			ring[new_idx] = ioat->ring[current_idx];
849287117Scem			ring[new_idx]->id = new_idx;
850287117Scem		}
851287117Scem
852287117Scem		/* add new descriptors to the ring */
853287117Scem		for (i = current_size; i < new_size; i++) {
854287117Scem			new_idx = (ioat->tail + i) & (new_size - 1);
855287117Scem
856287117Scem			ring[new_idx] = ioat_alloc_ring_entry(ioat);
857287138Scem			if (ring[new_idx] == NULL) {
858287117Scem				while (i--) {
859287117Scem					new_idx2 = (ioat->tail + i) &
860287117Scem					    (new_size - 1);
861287117Scem
862287117Scem					ioat_free_ring_entry(ioat,
863287117Scem					    ring[new_idx2]);
864287117Scem				}
865287117Scem				free(ring, M_IOAT);
866287117Scem				return (FALSE);
867287117Scem			}
868287117Scem			ring[new_idx]->id = new_idx;
869287117Scem		}
870287117Scem
871287117Scem		for (i = current_size - 1; i < new_size; i++) {
872287117Scem			new_idx = (ioat->tail + i) & (new_size - 1);
873287117Scem			next = ring[(new_idx + 1) & (new_size - 1)];
874287117Scem			hw = ring[new_idx]->u.dma;
875287117Scem
876287117Scem			hw->next = next->hw_desc_bus_addr;
877287117Scem		}
878287117Scem	} else {
879287117Scem		/*
880287117Scem		 * copy current descriptors to the new ring, dropping the
881287117Scem		 * removed descriptors
882287117Scem		 */
883287117Scem		for (i = 0; i < new_size; i++) {
884287117Scem			current_idx = (ioat->tail + i) & (current_size - 1);
885287117Scem			new_idx = (ioat->tail + i) & (new_size - 1);
886287117Scem
887287117Scem			ring[new_idx] = ioat->ring[current_idx];
888287117Scem			ring[new_idx]->id = new_idx;
889287117Scem		}
890287117Scem
891287117Scem		/* free deleted descriptors */
892287117Scem		for (i = new_size; i < current_size; i++) {
893287117Scem			ent = ioat_get_ring_entry(ioat, ioat->tail + i);
894287117Scem			ioat_free_ring_entry(ioat, ent);
895287117Scem		}
896287117Scem
897287117Scem		/* fix up hardware ring */
898287117Scem		hw = ring[(ioat->tail + new_size - 1) & (new_size - 1)]->u.dma;
899287117Scem		next = ring[(ioat->tail + new_size) & (new_size - 1)];
900287117Scem		hw->next = next->hw_desc_bus_addr;
901287117Scem	}
902287117Scem
903287117Scem	free(ioat->ring, M_IOAT);
904287117Scem	ioat->ring = ring;
905287117Scem	ioat->ring_size_order = order;
906287117Scem
907287117Scem	return (TRUE);
908287117Scem}
909287117Scem
910287117Scemstatic void
911287117Scemioat_timer_callback(void *arg)
912287117Scem{
913287117Scem	struct ioat_descriptor *desc;
914287117Scem	struct ioat_softc *ioat;
915287117Scem	uint64_t status;
916287117Scem	uint32_t chanerr;
917287117Scem
918287117Scem	ioat = arg;
919289904Scem	ioat_log_message(1, "%s\n", __func__);
920287117Scem
921287117Scem	if (ioat->is_completion_pending) {
922287117Scem		status = ioat_get_chansts(ioat);
923287117Scem
924287117Scem		/*
925287117Scem		 * When halted due to errors, check for channel programming
926287117Scem		 * errors before advancing the completion state.
927287117Scem		 */
928287117Scem		if (is_ioat_halted(status)) {
929287117Scem			chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
930287117Scem			ioat_log_message(0, "Channel halted (%x)\n", chanerr);
931287117Scem
932287117Scem			desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
933287117Scem			dump_descriptor(desc->u.raw);
934287117Scem
935287117Scem			desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
936287117Scem			dump_descriptor(desc->u.raw);
937287117Scem		}
938287117Scem		ioat_process_events(ioat);
939287117Scem	} else {
940287117Scem		mtx_lock(&ioat->submit_lock);
941287117Scem		mtx_lock(&ioat->cleanup_lock);
942287117Scem
943287117Scem		if (ioat_get_active(ioat) == 0 &&
944287117Scem		    ioat->ring_size_order > IOAT_MIN_ORDER)
945287117Scem			resize_ring(ioat, ioat->ring_size_order - 1);
946287117Scem
947287117Scem		mtx_unlock(&ioat->cleanup_lock);
948287117Scem		mtx_unlock(&ioat->submit_lock);
949287117Scem
950287117Scem		if (ioat->ring_size_order > IOAT_MIN_ORDER)
951289904Scem			callout_reset(&ioat->timer, IOAT_INTR_TIMO,
952287117Scem			    ioat_timer_callback, ioat);
953287117Scem	}
954287117Scem}
955287117Scem
956287117Scem/*
957287117Scem * Support Functions
958287117Scem */
959287117Scemstatic void
960287117Scemioat_submit_single(struct ioat_softc *ioat)
961287117Scem{
962287117Scem
963287117Scem	atomic_add_rel_int(&ioat->head, 1);
964287117Scem
965287117Scem	if (!ioat->is_completion_pending) {
966287117Scem		ioat->is_completion_pending = TRUE;
967289904Scem		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
968289904Scem		    ioat_timer_callback, ioat);
969287117Scem	}
970287117Scem}
971287117Scem
972287117Scemstatic int
973287117Scemioat_reset_hw(struct ioat_softc *ioat)
974287117Scem{
975287117Scem	uint64_t status;
976287117Scem	uint32_t chanerr;
977287414Scem	int timeout;
978287117Scem
979287117Scem	status = ioat_get_chansts(ioat);
980287117Scem	if (is_ioat_active(status) || is_ioat_idle(status))
981287117Scem		ioat_suspend(ioat);
982287117Scem
983287117Scem	/* Wait at most 20 ms */
984287117Scem	for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
985287117Scem	    timeout < 20; timeout++) {
986287117Scem		DELAY(1000);
987287117Scem		status = ioat_get_chansts(ioat);
988287117Scem	}
989287117Scem	if (timeout == 20)
990287117Scem		return (ETIMEDOUT);
991287117Scem
992287117Scem	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
993287117Scem	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
994287117Scem
995287117Scem	/*
996287117Scem	 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
997287117Scem	 *  that can cause stability issues for IOAT v3.
998287117Scem	 */
999287117Scem	pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1000287117Scem	    4);
1001287117Scem	chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1002287117Scem	pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1003287117Scem
1004287414Scem	/*
1005287414Scem	 * BDXDE and BWD models reset MSI-X registers on device reset.
1006287414Scem	 * Save/restore their contents manually.
1007287414Scem	 */
1008287414Scem	if (ioat_model_resets_msix(ioat))
1009287414Scem		pci_save_state(ioat->device);
1010287414Scem
1011287117Scem	ioat_reset(ioat);
1012287117Scem
1013287117Scem	/* Wait at most 20 ms */
1014287117Scem	for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1015287117Scem		DELAY(1000);
1016287117Scem	if (timeout == 20)
1017287117Scem		return (ETIMEDOUT);
1018287117Scem
1019287414Scem	if (ioat_model_resets_msix(ioat))
1020287414Scem		pci_restore_state(ioat->device);
1021287403Scem
1022287117Scem	return (0);
1023287117Scem}
1024287117Scem
1025287117Scemstatic void
1026287117Scemdump_descriptor(void *hw_desc)
1027287117Scem{
1028287117Scem	int i, j;
1029287117Scem
1030287117Scem	for (i = 0; i < 2; i++) {
1031287117Scem		for (j = 0; j < 8; j++)
1032287117Scem			printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1033287117Scem		printf("\n");
1034287117Scem	}
1035287117Scem}
1036287117Scem
1037287117Scemstatic void
1038287117Scemioat_setup_sysctl(device_t device)
1039287117Scem{
1040287117Scem	struct sysctl_ctx_list *sysctl_ctx;
1041287117Scem	struct sysctl_oid *sysctl_tree;
1042287117Scem	struct ioat_softc *ioat;
1043287117Scem
1044287117Scem	ioat = DEVICE2SOFTC(device);
1045287117Scem	sysctl_ctx = device_get_sysctl_ctx(device);
1046287117Scem	sysctl_tree = device_get_sysctl_tree(device);
1047287117Scem
1048287117Scem	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
1049287117Scem	    "ring_size_order", CTLFLAG_RD, &ioat->ring_size_order,
1050287117Scem	    0, "HW descriptor ring size order");
1051287117Scem	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
1052287117Scem	    "head", CTLFLAG_RD, &ioat->head,
1053287117Scem	    0, "HW descriptor head pointer index");
1054287117Scem	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
1055287117Scem	    "tail", CTLFLAG_RD, &ioat->tail,
1056287117Scem	    0, "HW descriptor tail pointer index");
1057287117Scem}
1058