ioat.c revision 289910
1210409Skib/*-
2247061Spjd * Copyright (C) 2012 Intel Corporation
3210409Skib * All rights reserved.
4226839Spjd *
5255187Sjmg * Redistribution and use in source and binary forms, with or without
6210409Skib * modification, are permitted provided that the following conditions
7210409Skib * are met:
8210409Skib * 1. Redistributions of source code must retain the above copyright
9210409Skib *    notice, this list of conditions and the following disclaimer.
10210409Skib * 2. Redistributions in binary form must reproduce the above copyright
11210409Skib *    notice, this list of conditions and the following disclaimer in the
12210409Skib *    documentation and/or other materials provided with the distribution.
13210409Skib *
14210409Skib * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15210409Skib * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16210409Skib * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17210409Skib * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18210409Skib * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19210409Skib * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20210409Skib * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21210409Skib * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22210409Skib * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23210409Skib * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24210409Skib * SUCH DAMAGE.
25210409Skib */
26210409Skib
27210409Skib#include <sys/cdefs.h>
28210409Skib__FBSDID("$FreeBSD: head/sys/dev/ioat/ioat.c 289910 2015-10-24 23:46:08Z cem $");
29210409Skib
30210409Skib#include <sys/param.h>
31210409Skib#include <sys/systm.h>
32255187Sjmg#include <sys/bus.h>
33210409Skib#include <sys/conf.h>
34210409Skib#include <sys/ioccom.h>
35210409Skib#include <sys/kernel.h>
36210409Skib#include <sys/lock.h>
37210409Skib#include <sys/malloc.h>
38210409Skib#include <sys/module.h>
39255187Sjmg#include <sys/mutex.h>
40255187Sjmg#include <sys/rman.h>
41210409Skib#include <sys/sysctl.h>
42210409Skib#include <sys/time.h>
43210409Skib#include <dev/pci/pcireg.h>
44257757Sjmg#include <dev/pci/pcivar.h>
45257757Sjmg#include <machine/bus.h>
46257757Sjmg#include <machine/resource.h>
47257757Sjmg#include <machine/stdarg.h>
48210409Skib
49210409Skib#include "ioat.h"
50210409Skib#include "ioat_hw.h"
51210409Skib#include "ioat_internal.h"
52255187Sjmg
53210409Skib#define	IOAT_INTR_TIMO	(hz / 10)
54210409Skib#define	IOAT_REFLK	(&ioat->submit_lock)
55210409Skib
56255187Sjmgstatic int ioat_probe(device_t device);
57210409Skibstatic int ioat_attach(device_t device);
58255187Sjmgstatic int ioat_detach(device_t device);
59255187Sjmgstatic int ioat_setup_intr(struct ioat_softc *ioat);
60255187Sjmgstatic int ioat_teardown_intr(struct ioat_softc *ioat);
61255187Sjmgstatic int ioat3_attach(device_t device);
62210409Skibstatic int ioat3_selftest(struct ioat_softc *ioat);
63210409Skibstatic int ioat_map_pci_bar(struct ioat_softc *ioat);
64210409Skibstatic void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
65210409Skib    int error);
66210409Skibstatic void ioat_interrupt_handler(void *arg);
67210409Skibstatic boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
68255187Sjmgstatic void ioat_process_events(struct ioat_softc *ioat);
69255187Sjmgstatic inline uint32_t ioat_get_active(struct ioat_softc *ioat);
70255187Sjmgstatic inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
71255187Sjmgstatic void ioat_free_ring_entry(struct ioat_softc *ioat,
72257757Sjmg    struct ioat_descriptor *desc);
73255187Sjmgstatic struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *ioat);
74255187Sjmgstatic int ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs);
75255187Sjmgstatic struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
76255187Sjmg    uint32_t index);
77255187Sjmgstatic boolean_t resize_ring(struct ioat_softc *ioat, int order);
78255187Sjmgstatic void ioat_timer_callback(void *arg);
79257757Sjmgstatic void dump_descriptor(void *hw_desc);
80257757Sjmgstatic void ioat_submit_single(struct ioat_softc *ioat);
81257757Sjmgstatic void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
82257757Sjmg    int error);
83255187Sjmgstatic int ioat_reset_hw(struct ioat_softc *ioat);
84257757Sjmgstatic void ioat_setup_sysctl(device_t device);
85257757Sjmgstatic int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
86255187Sjmgstatic inline struct ioat_softc *ioat_get(struct ioat_softc *,
87255187Sjmg    enum ioat_ref_kind);
88255187Sjmgstatic inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
89255187Sjmgstatic inline void ioat_putn(struct ioat_softc *, uint32_t,
90255187Sjmg    enum ioat_ref_kind);
91255187Sjmgstatic void ioat_drain(struct ioat_softc *);
92255187Sjmg
93257757Sjmg#define	ioat_log_message(v, ...) do {					\
94257757Sjmg	if ((v) <= g_ioat_debug_level) {				\
95257757Sjmg		device_printf(ioat->device, __VA_ARGS__);		\
96255187Sjmg	}								\
97255187Sjmg} while (0)
98255187Sjmg
99255187SjmgMALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
100255187SjmgSYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
101255187Sjmg
102210409Skibstatic int g_force_legacy_interrupts;
103255187SjmgSYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
104210409Skib    &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
105255187Sjmg
106257757Sjmgint g_ioat_debug_level = 0;
107257757SjmgSYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
108257757Sjmg    0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
109255187Sjmg
110210409Skib/*
111255187Sjmg * OS <-> Driver interface structures
112255187Sjmg */
113257757Sjmgstatic device_method_t ioat_pci_methods[] = {
114257757Sjmg	/* Device interface */
115257757Sjmg	DEVMETHOD(device_probe,     ioat_probe),
116257757Sjmg	DEVMETHOD(device_attach,    ioat_attach),
117257757Sjmg	DEVMETHOD(device_detach,    ioat_detach),
118257757Sjmg	{ 0, 0 }
119257757Sjmg};
120257757Sjmg
121257757Sjmgstatic driver_t ioat_pci_driver = {
122257757Sjmg	"ioat",
123257757Sjmg	ioat_pci_methods,
124257757Sjmg	sizeof(struct ioat_softc),
125257757Sjmg};
126255187Sjmg
127255187Sjmgstatic devclass_t ioat_devclass;
128255187SjmgDRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
129255187Sjmg
130255187Sjmg/*
131255187Sjmg * Private data structures
132255187Sjmg */
133255187Sjmgstatic struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
134255187Sjmgstatic int ioat_channel_index = 0;
135210409SkibSYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
136210409Skib    "Number of IOAT channels attached");
137210409Skib
138210409Skibstatic struct _pcsid
139210409Skib{
140210409Skib	u_int32_t   type;
141210409Skib	const char  *desc;
142210409Skib} pci_ids[] = {
143210409Skib	{ 0x34308086, "TBG IOAT Ch0" },
144255187Sjmg	{ 0x34318086, "TBG IOAT Ch1" },
145257757Sjmg	{ 0x34328086, "TBG IOAT Ch2" },
146257757Sjmg	{ 0x34338086, "TBG IOAT Ch3" },
147257757Sjmg	{ 0x34298086, "TBG IOAT Ch4" },
148255187Sjmg	{ 0x342a8086, "TBG IOAT Ch5" },
149210409Skib	{ 0x342b8086, "TBG IOAT Ch6" },
150255187Sjmg	{ 0x342c8086, "TBG IOAT Ch7" },
151255187Sjmg
152257757Sjmg	{ 0x37108086, "JSF IOAT Ch0" },
153257757Sjmg	{ 0x37118086, "JSF IOAT Ch1" },
154257757Sjmg	{ 0x37128086, "JSF IOAT Ch2" },
155257757Sjmg	{ 0x37138086, "JSF IOAT Ch3" },
156257757Sjmg	{ 0x37148086, "JSF IOAT Ch4" },
157257757Sjmg	{ 0x37158086, "JSF IOAT Ch5" },
158257757Sjmg	{ 0x37168086, "JSF IOAT Ch6" },
159257757Sjmg	{ 0x37178086, "JSF IOAT Ch7" },
160257757Sjmg	{ 0x37188086, "JSF IOAT Ch0 (RAID)" },
161257757Sjmg	{ 0x37198086, "JSF IOAT Ch1 (RAID)" },
162257757Sjmg
163257757Sjmg	{ 0x3c208086, "SNB IOAT Ch0" },
164257757Sjmg	{ 0x3c218086, "SNB IOAT Ch1" },
165255187Sjmg	{ 0x3c228086, "SNB IOAT Ch2" },
166255187Sjmg	{ 0x3c238086, "SNB IOAT Ch3" },
167255187Sjmg	{ 0x3c248086, "SNB IOAT Ch4" },
168255187Sjmg	{ 0x3c258086, "SNB IOAT Ch5" },
169255187Sjmg	{ 0x3c268086, "SNB IOAT Ch6" },
170255187Sjmg	{ 0x3c278086, "SNB IOAT Ch7" },
171255187Sjmg	{ 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
172255187Sjmg	{ 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
173255187Sjmg
174210409Skib	{ 0x0e208086, "IVB IOAT Ch0" },
175210409Skib	{ 0x0e218086, "IVB IOAT Ch1" },
176210409Skib	{ 0x0e228086, "IVB IOAT Ch2" },
177210409Skib	{ 0x0e238086, "IVB IOAT Ch3" },
178210409Skib	{ 0x0e248086, "IVB IOAT Ch4" },
179213069Spjd	{ 0x0e258086, "IVB IOAT Ch5" },
180213069Spjd	{ 0x0e268086, "IVB IOAT Ch6" },
181213069Spjd	{ 0x0e278086, "IVB IOAT Ch7" },
182213069Spjd	{ 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
183255187Sjmg	{ 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
184255187Sjmg
185255187Sjmg	{ 0x2f208086, "HSW IOAT Ch0" },
186255187Sjmg	{ 0x2f218086, "HSW IOAT Ch1" },
187255187Sjmg	{ 0x2f228086, "HSW IOAT Ch2" },
188255187Sjmg	{ 0x2f238086, "HSW IOAT Ch3" },
189255187Sjmg	{ 0x2f248086, "HSW IOAT Ch4" },
190255187Sjmg	{ 0x2f258086, "HSW IOAT Ch5" },
191255187Sjmg	{ 0x2f268086, "HSW IOAT Ch6" },
192255187Sjmg	{ 0x2f278086, "HSW IOAT Ch7" },
193255187Sjmg	{ 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
194255187Sjmg	{ 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
195255187Sjmg
196255187Sjmg	{ 0x0c508086, "BWD IOAT Ch0" },
197255187Sjmg	{ 0x0c518086, "BWD IOAT Ch1" },
198255187Sjmg	{ 0x0c528086, "BWD IOAT Ch2" },
199255187Sjmg	{ 0x0c538086, "BWD IOAT Ch3" },
200255187Sjmg
201213069Spjd	{ 0x6f508086, "BDXDE IOAT Ch0" },
202257757Sjmg	{ 0x6f518086, "BDXDE IOAT Ch1" },
203257757Sjmg	{ 0x6f528086, "BDXDE IOAT Ch2" },
204213069Spjd	{ 0x6f538086, "BDXDE IOAT Ch3" },
205255187Sjmg
206213069Spjd	{ 0x00000000, NULL           }
207257757Sjmg};
208213069Spjd
209213069Spjd/*
210255187Sjmg * OS <-> Driver linkage functions
211213069Spjd */
212255187Sjmgstatic int
213213069Spjdioat_probe(device_t device)
214257757Sjmg{
215213069Spjd	struct _pcsid *ep;
216255187Sjmg	u_int32_t type;
217255187Sjmg
218226837Spjd	type = pci_get_devid(device);
219255187Sjmg	for (ep = pci_ids; ep->type; ep++) {
220257757Sjmg		if (ep->type == type) {
221257757Sjmg			device_set_desc(device, ep->desc);
222255187Sjmg			return (0);
223255187Sjmg		}
224255187Sjmg	}
225255187Sjmg	return (ENXIO);
226255187Sjmg}
227257757Sjmg
228257757Sjmgstatic int
229255187Sjmgioat_attach(device_t device)
230255187Sjmg{
231255187Sjmg	struct ioat_softc *ioat;
232255187Sjmg	int error;
233255187Sjmg
234255187Sjmg	ioat = DEVICE2SOFTC(device);
235255187Sjmg	ioat->device = device;
236257757Sjmg
237255187Sjmg	error = ioat_map_pci_bar(ioat);
238255187Sjmg	if (error != 0)
239255187Sjmg		goto err;
240257757Sjmg
241257757Sjmg	ioat->version = ioat_read_cbver(ioat);
242255187Sjmg	if (ioat->version < IOAT_VER_3_0) {
243255187Sjmg		error = ENODEV;
244255187Sjmg		goto err;
245255187Sjmg	}
246255187Sjmg
247255187Sjmg	error = ioat3_attach(device);
248255187Sjmg	if (error != 0)
249255187Sjmg		goto err;
250255187Sjmg
251255187Sjmg	error = pci_enable_busmaster(device);
252255187Sjmg	if (error != 0)
253255187Sjmg		goto err;
254255187Sjmg
255255187Sjmg	error = ioat_setup_intr(ioat);
256255187Sjmg	if (error != 0)
257255187Sjmg		goto err;
258255187Sjmg
259255187Sjmg	error = ioat3_selftest(ioat);
260255187Sjmg	if (error != 0)
261257757Sjmg		goto err;
262257757Sjmg
263257757Sjmg	ioat_process_events(ioat);
264257757Sjmg	ioat_setup_sysctl(device);
265257757Sjmg
266257757Sjmg	ioat_channel[ioat_channel_index++] = ioat;
267257757Sjmg	ioat_test_attach();
268257757Sjmg
269257757Sjmgerr:
270213069Spjd	if (error != 0)
271213069Spjd		ioat_detach(device);
272213069Spjd	return (error);
273257757Sjmg}
274257757Sjmg
275257757Sjmgstatic int
276213069Spjdioat_detach(device_t device)
277255187Sjmg{
278255187Sjmg	struct ioat_softc *ioat;
279255187Sjmg	uint32_t i;
280213069Spjd
281213069Spjd	ioat = DEVICE2SOFTC(device);
282213069Spjd
283213069Spjd	ioat_test_detach();
284213069Spjd	ioat_drain(ioat);
285226837Spjd
286226837Spjd	ioat_teardown_intr(ioat);
287213069Spjd	callout_drain(&ioat->timer);
288213069Spjd
289226837Spjd	pci_disable_busmaster(device);
290226837Spjd
291226837Spjd	if (ioat->pci_resource != NULL)
292255187Sjmg		bus_release_resource(device, SYS_RES_MEMORY,
293255187Sjmg		    ioat->pci_resource_id, ioat->pci_resource);
294213069Spjd
295255187Sjmg	if (ioat->ring != NULL) {
296255187Sjmg		for (i = 0; i < (1 << ioat->ring_size_order); i++)
297255187Sjmg			ioat_free_ring_entry(ioat, ioat->ring[i]);
298257757Sjmg		free(ioat->ring, M_IOAT);
299255187Sjmg	}
300255187Sjmg
301255187Sjmg	if (ioat->comp_update != NULL) {
302255187Sjmg		bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
303255187Sjmg		bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
304255187Sjmg		    ioat->comp_update_map);
305255187Sjmg		bus_dma_tag_destroy(ioat->comp_update_tag);
306257757Sjmg	}
307213069Spjd
308213069Spjd	bus_dma_tag_destroy(ioat->hw_desc_tag);
309213069Spjd
310213069Spjd	return (0);
311213069Spjd}
312255187Sjmg
313213069Spjdstatic int
314213069Spjdioat_teardown_intr(struct ioat_softc *ioat)
315213069Spjd{
316213069Spjd
317213069Spjd	if (ioat->tag != NULL)
318213069Spjd		bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
319213069Spjd
320213069Spjd	if (ioat->res != NULL)
321213069Spjd		bus_release_resource(ioat->device, SYS_RES_IRQ,
322255187Sjmg		    rman_get_rid(ioat->res), ioat->res);
323213069Spjd
324213069Spjd	pci_release_msi(ioat->device);
325213069Spjd	return (0);
326213069Spjd}
327213069Spjd
328213069Spjdstatic int
329213069Spjdioat3_selftest(struct ioat_softc *ioat)
330213069Spjd{
331213069Spjd	uint64_t status;
332267815Skib	uint32_t chanerr;
333213066Spjd	int i;
334213066Spjd
335210409Skib	ioat_acquire(&ioat->dmaengine);
336210409Skib	ioat_null(&ioat->dmaengine, NULL, NULL, 0);
337213069Spjd	ioat_release(&ioat->dmaengine);
338213069Spjd
339213069Spjd	for (i = 0; i < 100; i++) {
340213069Spjd		DELAY(1);
341213069Spjd		status = ioat_get_chansts(ioat);
342213069Spjd		if (is_ioat_idle(status))
343213069Spjd			return (0);
344213069Spjd	}
345213069Spjd
346213069Spjd	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
347213069Spjd	ioat_log_message(0, "could not start channel: "
348213069Spjd	    "status = %#jx error = %x\n", (uintmax_t)status, chanerr);
349213069Spjd	return (ENXIO);
350213069Spjd}
351213069Spjd
352210409Skib/*
353213069Spjd * Initialize Hardware
354213069Spjd */
355213069Spjdstatic int
356213069Spjdioat3_attach(device_t device)
357213069Spjd{
358213069Spjd	struct ioat_softc *ioat;
359213069Spjd	struct ioat_descriptor **ring;
360213069Spjd	struct ioat_descriptor *next;
361213069Spjd	struct ioat_dma_hw_descriptor *dma_hw_desc;
362213069Spjd	uint32_t capabilities;
363213069Spjd	int i, num_descriptors;
364210409Skib	int error;
365210409Skib	uint8_t xfercap;
366210409Skib
367210409Skib	error = 0;
368213069Spjd	ioat = DEVICE2SOFTC(device);
369213066Spjd	capabilities = ioat_read_dmacapability(ioat);
370213066Spjd
371213166Spjd	xfercap = ioat_read_xfercap(ioat);
372213069Spjd	ioat->max_xfer_size = 1 << xfercap;
373213069Spjd
374213069Spjd	/* TODO: need to check DCA here if we ever do XOR/PQ */
375213069Spjd
376213069Spjd	mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
377210409Skib	mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF);
378213066Spjd	callout_init(&ioat->timer, 1);
379210409Skib
380	ioat->is_resize_pending = FALSE;
381	ioat->is_completion_pending = FALSE;
382	ioat->is_reset_pending = FALSE;
383	ioat->is_channel_running = FALSE;
384	ioat->is_waiting_for_ack = FALSE;
385
386	bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
387	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
388	    sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
389	    &ioat->comp_update_tag);
390
391	error = bus_dmamem_alloc(ioat->comp_update_tag,
392	    (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
393	if (ioat->comp_update == NULL)
394		return (ENOMEM);
395
396	error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
397	    ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
398	    0);
399	if (error != 0)
400		return (error);
401
402	ioat->ring_size_order = IOAT_MIN_ORDER;
403
404	num_descriptors = 1 << ioat->ring_size_order;
405
406	bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
407	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
408	    sizeof(struct ioat_dma_hw_descriptor), 1,
409	    sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
410	    &ioat->hw_desc_tag);
411
412	ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
413	    M_ZERO | M_NOWAIT);
414	if (ioat->ring == NULL)
415		return (ENOMEM);
416
417	ring = ioat->ring;
418	for (i = 0; i < num_descriptors; i++) {
419		ring[i] = ioat_alloc_ring_entry(ioat);
420		if (ring[i] == NULL)
421			return (ENOMEM);
422
423		ring[i]->id = i;
424	}
425
426	for (i = 0; i < num_descriptors - 1; i++) {
427		next = ring[i + 1];
428		dma_hw_desc = ring[i]->u.dma;
429
430		dma_hw_desc->next = next->hw_desc_bus_addr;
431	}
432
433	ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
434
435	ioat->head = 0;
436	ioat->tail = 0;
437	ioat->last_seen = 0;
438
439	error = ioat_reset_hw(ioat);
440	if (error != 0)
441		return (error);
442
443	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
444	ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
445	ioat_write_chainaddr(ioat, ring[0]->hw_desc_bus_addr);
446	return (0);
447}
448
449static int
450ioat_map_pci_bar(struct ioat_softc *ioat)
451{
452
453	ioat->pci_resource_id = PCIR_BAR(0);
454	ioat->pci_resource = bus_alloc_resource(ioat->device, SYS_RES_MEMORY,
455	    &ioat->pci_resource_id, 0, ~0, 1, RF_ACTIVE);
456
457	if (ioat->pci_resource == NULL) {
458		ioat_log_message(0, "unable to allocate pci resource\n");
459		return (ENODEV);
460	}
461
462	ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
463	ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
464	return (0);
465}
466
467static void
468ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
469{
470	struct ioat_softc *ioat = arg;
471
472	ioat->comp_update_bus_addr = seg[0].ds_addr;
473}
474
475static void
476ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
477{
478	bus_addr_t *baddr;
479
480	baddr = arg;
481	*baddr = segs->ds_addr;
482}
483
484/*
485 * Interrupt setup and handlers
486 */
487static int
488ioat_setup_intr(struct ioat_softc *ioat)
489{
490	uint32_t num_vectors;
491	int error;
492	boolean_t use_msix;
493	boolean_t force_legacy_interrupts;
494
495	use_msix = FALSE;
496	force_legacy_interrupts = FALSE;
497
498	if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
499		num_vectors = 1;
500		pci_alloc_msix(ioat->device, &num_vectors);
501		if (num_vectors == 1)
502			use_msix = TRUE;
503	}
504
505	if (use_msix) {
506		ioat->rid = 1;
507		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
508		    &ioat->rid, RF_ACTIVE);
509	} else {
510		ioat->rid = 0;
511		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
512		    &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
513	}
514	if (ioat->res == NULL) {
515		ioat_log_message(0, "bus_alloc_resource failed\n");
516		return (ENOMEM);
517	}
518
519	ioat->tag = NULL;
520	error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
521	    INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
522	if (error != 0) {
523		ioat_log_message(0, "bus_setup_intr failed\n");
524		return (error);
525	}
526
527	ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
528	return (0);
529}
530
531static boolean_t
532ioat_model_resets_msix(struct ioat_softc *ioat)
533{
534	u_int32_t pciid;
535
536	pciid = pci_get_devid(ioat->device);
537	switch (pciid) {
538		/* BWD: */
539	case 0x0c508086:
540	case 0x0c518086:
541	case 0x0c528086:
542	case 0x0c538086:
543		/* BDXDE: */
544	case 0x6f508086:
545	case 0x6f518086:
546	case 0x6f528086:
547	case 0x6f538086:
548		return (TRUE);
549	}
550
551	return (FALSE);
552}
553
554static void
555ioat_interrupt_handler(void *arg)
556{
557	struct ioat_softc *ioat = arg;
558
559	ioat_process_events(ioat);
560}
561
562static void
563ioat_process_events(struct ioat_softc *ioat)
564{
565	struct ioat_descriptor *desc;
566	struct bus_dmadesc *dmadesc;
567	uint64_t comp_update, status;
568	uint32_t completed;
569
570	mtx_lock(&ioat->cleanup_lock);
571
572	completed = 0;
573	comp_update = *ioat->comp_update;
574	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
575
576	ioat_log_message(3, "%s\n", __func__);
577
578	if (status == ioat->last_seen)
579		goto out;
580
581	while (1) {
582		desc = ioat_get_ring_entry(ioat, ioat->tail);
583		dmadesc = &desc->bus_dmadesc;
584		ioat_log_message(3, "completing desc %d\n", ioat->tail);
585
586		if (dmadesc->callback_fn)
587			(*dmadesc->callback_fn)(dmadesc->callback_arg);
588
589		completed++;
590		ioat->tail++;
591		if (desc->hw_desc_bus_addr == status)
592			break;
593	}
594
595	ioat->last_seen = desc->hw_desc_bus_addr;
596
597	if (ioat->head == ioat->tail) {
598		ioat->is_completion_pending = FALSE;
599		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
600		    ioat_timer_callback, ioat);
601	}
602
603out:
604	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
605	mtx_unlock(&ioat->cleanup_lock);
606
607	ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
608}
609
610/*
611 * User API functions
612 */
613bus_dmaengine_t
614ioat_get_dmaengine(uint32_t index)
615{
616
617	if (index >= ioat_channel_index)
618		return (NULL);
619	return (&ioat_get(ioat_channel[index], IOAT_DMAENGINE_REF)->dmaengine);
620}
621
622void
623ioat_put_dmaengine(bus_dmaengine_t dmaengine)
624{
625	struct ioat_softc *ioat;
626
627	ioat = to_ioat_softc(dmaengine);
628	ioat_put(ioat, IOAT_DMAENGINE_REF);
629}
630
631void
632ioat_acquire(bus_dmaengine_t dmaengine)
633{
634	struct ioat_softc *ioat;
635
636	ioat = to_ioat_softc(dmaengine);
637	mtx_lock(&ioat->submit_lock);
638	ioat_log_message(3, "%s\n", __func__);
639}
640
641void
642ioat_release(bus_dmaengine_t dmaengine)
643{
644	struct ioat_softc *ioat;
645
646	ioat = to_ioat_softc(dmaengine);
647	ioat_log_message(3, "%s\n", __func__);
648	ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->head);
649	mtx_unlock(&ioat->submit_lock);
650}
651
652struct bus_dmadesc *
653ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
654    void *callback_arg, uint32_t flags)
655{
656	struct ioat_softc *ioat;
657	struct ioat_descriptor *desc;
658	struct ioat_dma_hw_descriptor *hw_desc;
659
660	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
661		flags & ~DMA_ALL_FLAGS));
662
663	ioat = to_ioat_softc(dmaengine);
664	mtx_assert(&ioat->submit_lock, MA_OWNED);
665
666	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
667		return (NULL);
668
669	ioat_log_message(3, "%s\n", __func__);
670
671	desc = ioat_get_ring_entry(ioat, ioat->head);
672	hw_desc = desc->u.dma;
673
674	hw_desc->u.control_raw = 0;
675	hw_desc->u.control.null = 1;
676	hw_desc->u.control.completion_update = 1;
677
678	if ((flags & DMA_INT_EN) != 0)
679		hw_desc->u.control.int_enable = 1;
680
681	hw_desc->size = 8;
682	hw_desc->src_addr = 0;
683	hw_desc->dest_addr = 0;
684
685	desc->bus_dmadesc.callback_fn = callback_fn;
686	desc->bus_dmadesc.callback_arg = callback_arg;
687
688	ioat_submit_single(ioat);
689	return (&desc->bus_dmadesc);
690}
691
692struct bus_dmadesc *
693ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
694    bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
695    void *callback_arg, uint32_t flags)
696{
697	struct ioat_descriptor *desc;
698	struct ioat_dma_hw_descriptor *hw_desc;
699	struct ioat_softc *ioat;
700
701	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
702		flags & ~DMA_ALL_FLAGS));
703
704	ioat = to_ioat_softc(dmaengine);
705	mtx_assert(&ioat->submit_lock, MA_OWNED);
706
707	if (len > ioat->max_xfer_size) {
708		ioat_log_message(0, "%s: max_xfer_size = %d, requested = %d\n",
709		    __func__, ioat->max_xfer_size, (int)len);
710		return (NULL);
711	}
712
713	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
714		return (NULL);
715
716	ioat_log_message(3, "%s\n", __func__);
717
718	desc = ioat_get_ring_entry(ioat, ioat->head);
719	hw_desc = desc->u.dma;
720
721	hw_desc->u.control_raw = 0;
722	hw_desc->u.control.completion_update = 1;
723
724	if ((flags & DMA_INT_EN) != 0)
725		hw_desc->u.control.int_enable = 1;
726
727	hw_desc->size = len;
728	hw_desc->src_addr = src;
729	hw_desc->dest_addr = dst;
730
731	if (g_ioat_debug_level >= 3)
732		dump_descriptor(hw_desc);
733
734	desc->bus_dmadesc.callback_fn = callback_fn;
735	desc->bus_dmadesc.callback_arg = callback_arg;
736
737	ioat_submit_single(ioat);
738	return (&desc->bus_dmadesc);
739}
740
741/*
742 * Ring Management
743 */
744static inline uint32_t
745ioat_get_active(struct ioat_softc *ioat)
746{
747
748	return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
749}
750
751static inline uint32_t
752ioat_get_ring_space(struct ioat_softc *ioat)
753{
754
755	return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
756}
757
758static struct ioat_descriptor *
759ioat_alloc_ring_entry(struct ioat_softc *ioat)
760{
761	struct ioat_dma_hw_descriptor *hw_desc;
762	struct ioat_descriptor *desc;
763	int error;
764
765	error = ENOMEM;
766	hw_desc = NULL;
767
768	desc = malloc(sizeof(*desc), M_IOAT, M_NOWAIT);
769	if (desc == NULL)
770		goto out;
771
772	bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
773	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &ioat->hw_desc_map);
774	if (hw_desc == NULL)
775		goto out;
776
777	desc->u.dma = hw_desc;
778
779	error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
780	    sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
781	    BUS_DMA_NOWAIT);
782	if (error)
783		goto out;
784
785out:
786	if (error) {
787		ioat_free_ring_entry(ioat, desc);
788		return (NULL);
789	}
790	return (desc);
791}
792
793static void
794ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
795{
796
797	if (desc == NULL)
798		return;
799
800	if (desc->u.dma)
801		bus_dmamem_free(ioat->hw_desc_tag, desc->u.dma,
802		    ioat->hw_desc_map);
803	free(desc, M_IOAT);
804}
805
806static int
807ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs)
808{
809	boolean_t retry;
810
811	while (1) {
812		if (ioat_get_ring_space(ioat) >= num_descs)
813			return (0);
814
815		mtx_lock(&ioat->cleanup_lock);
816		retry = resize_ring(ioat, ioat->ring_size_order + 1);
817		mtx_unlock(&ioat->cleanup_lock);
818
819		if (!retry)
820			return (ENOMEM);
821	}
822}
823
824static struct ioat_descriptor *
825ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
826{
827
828	return (ioat->ring[index % (1 << ioat->ring_size_order)]);
829}
830
831static boolean_t
832resize_ring(struct ioat_softc *ioat, int order)
833{
834	struct ioat_descriptor **ring;
835	struct ioat_descriptor *next;
836	struct ioat_dma_hw_descriptor *hw;
837	struct ioat_descriptor *ent;
838	uint32_t current_size, active, new_size, i, new_idx, current_idx;
839	uint32_t new_idx2;
840
841	current_size = 1 << ioat->ring_size_order;
842	active = (ioat->head - ioat->tail) & (current_size - 1);
843	new_size = 1 << order;
844
845	if (order > IOAT_MAX_ORDER)
846		return (FALSE);
847
848	/*
849	 * when shrinking, verify that we can hold the current active
850	 * set in the new ring
851	 */
852	if (active >= new_size)
853		return (FALSE);
854
855	/* allocate the array to hold the software ring */
856	ring = malloc(new_size * sizeof(*ring), M_IOAT, M_ZERO | M_NOWAIT);
857	if (ring == NULL)
858		return (FALSE);
859
860	ioat_log_message(2, "ring resize: new: %d old: %d\n",
861	    new_size, current_size);
862
863	/* allocate/trim descriptors as needed */
864	if (new_size > current_size) {
865		/* copy current descriptors to the new ring */
866		for (i = 0; i < current_size; i++) {
867			current_idx = (ioat->tail + i) & (current_size - 1);
868			new_idx = (ioat->tail + i) & (new_size - 1);
869
870			ring[new_idx] = ioat->ring[current_idx];
871			ring[new_idx]->id = new_idx;
872		}
873
874		/* add new descriptors to the ring */
875		for (i = current_size; i < new_size; i++) {
876			new_idx = (ioat->tail + i) & (new_size - 1);
877
878			ring[new_idx] = ioat_alloc_ring_entry(ioat);
879			if (ring[new_idx] == NULL) {
880				while (i--) {
881					new_idx2 = (ioat->tail + i) &
882					    (new_size - 1);
883
884					ioat_free_ring_entry(ioat,
885					    ring[new_idx2]);
886				}
887				free(ring, M_IOAT);
888				return (FALSE);
889			}
890			ring[new_idx]->id = new_idx;
891		}
892
893		for (i = current_size - 1; i < new_size; i++) {
894			new_idx = (ioat->tail + i) & (new_size - 1);
895			next = ring[(new_idx + 1) & (new_size - 1)];
896			hw = ring[new_idx]->u.dma;
897
898			hw->next = next->hw_desc_bus_addr;
899		}
900	} else {
901		/*
902		 * copy current descriptors to the new ring, dropping the
903		 * removed descriptors
904		 */
905		for (i = 0; i < new_size; i++) {
906			current_idx = (ioat->tail + i) & (current_size - 1);
907			new_idx = (ioat->tail + i) & (new_size - 1);
908
909			ring[new_idx] = ioat->ring[current_idx];
910			ring[new_idx]->id = new_idx;
911		}
912
913		/* free deleted descriptors */
914		for (i = new_size; i < current_size; i++) {
915			ent = ioat_get_ring_entry(ioat, ioat->tail + i);
916			ioat_free_ring_entry(ioat, ent);
917		}
918
919		/* fix up hardware ring */
920		hw = ring[(ioat->tail + new_size - 1) & (new_size - 1)]->u.dma;
921		next = ring[(ioat->tail + new_size) & (new_size - 1)];
922		hw->next = next->hw_desc_bus_addr;
923	}
924
925	free(ioat->ring, M_IOAT);
926	ioat->ring = ring;
927	ioat->ring_size_order = order;
928
929	return (TRUE);
930}
931
932static void
933ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
934{
935	struct ioat_descriptor *desc;
936
937	ioat_log_message(0, "Channel halted (%x)\n", chanerr);
938	if (chanerr == 0)
939		return;
940
941	desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
942	dump_descriptor(desc->u.raw);
943
944	desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
945	dump_descriptor(desc->u.raw);
946}
947
948static void
949ioat_timer_callback(void *arg)
950{
951	struct ioat_softc *ioat;
952	uint64_t status;
953	uint32_t chanerr;
954
955	ioat = arg;
956	ioat_log_message(1, "%s\n", __func__);
957
958	if (ioat->is_completion_pending) {
959		status = ioat_get_chansts(ioat);
960
961		/*
962		 * When halted due to errors, check for channel programming
963		 * errors before advancing the completion state.
964		 */
965		if (is_ioat_halted(status)) {
966			chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
967			ioat_halted_debug(ioat, chanerr);
968		}
969		ioat_process_events(ioat);
970	} else {
971		mtx_lock(&ioat->submit_lock);
972		mtx_lock(&ioat->cleanup_lock);
973
974		if (ioat_get_active(ioat) == 0 &&
975		    ioat->ring_size_order > IOAT_MIN_ORDER)
976			resize_ring(ioat, ioat->ring_size_order - 1);
977
978		mtx_unlock(&ioat->cleanup_lock);
979		mtx_unlock(&ioat->submit_lock);
980
981		if (ioat->ring_size_order > IOAT_MIN_ORDER)
982			callout_reset(&ioat->timer, IOAT_INTR_TIMO,
983			    ioat_timer_callback, ioat);
984	}
985}
986
987/*
988 * Support Functions
989 */
990static void
991ioat_submit_single(struct ioat_softc *ioat)
992{
993
994	ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
995	atomic_add_rel_int(&ioat->head, 1);
996
997	if (!ioat->is_completion_pending) {
998		ioat->is_completion_pending = TRUE;
999		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
1000		    ioat_timer_callback, ioat);
1001	}
1002}
1003
1004static int
1005ioat_reset_hw(struct ioat_softc *ioat)
1006{
1007	uint64_t status;
1008	uint32_t chanerr;
1009	int timeout;
1010
1011	status = ioat_get_chansts(ioat);
1012	if (is_ioat_active(status) || is_ioat_idle(status))
1013		ioat_suspend(ioat);
1014
1015	/* Wait at most 20 ms */
1016	for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1017	    timeout < 20; timeout++) {
1018		DELAY(1000);
1019		status = ioat_get_chansts(ioat);
1020	}
1021	if (timeout == 20)
1022		return (ETIMEDOUT);
1023
1024	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1025	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1026
1027	/*
1028	 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1029	 *  that can cause stability issues for IOAT v3.
1030	 */
1031	pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1032	    4);
1033	chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1034	pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1035
1036	/*
1037	 * BDXDE and BWD models reset MSI-X registers on device reset.
1038	 * Save/restore their contents manually.
1039	 */
1040	if (ioat_model_resets_msix(ioat)) {
1041		ioat_log_message(1, "device resets MSI-X registers; saving\n");
1042		pci_save_state(ioat->device);
1043	}
1044
1045	ioat_reset(ioat);
1046
1047	/* Wait at most 20 ms */
1048	for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1049		DELAY(1000);
1050	if (timeout == 20)
1051		return (ETIMEDOUT);
1052
1053	if (ioat_model_resets_msix(ioat)) {
1054		ioat_log_message(1, "device resets registers; restored\n");
1055		pci_restore_state(ioat->device);
1056	}
1057
1058	return (0);
1059}
1060
1061static int
1062sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1063{
1064	struct ioat_softc *ioat;
1065	int error, arg;
1066
1067	ioat = arg1;
1068
1069	arg = 0;
1070	error = SYSCTL_OUT(req, &arg, sizeof(arg));
1071	if (error != 0 || req->newptr == NULL)
1072		return (error);
1073
1074	error = SYSCTL_IN(req, &arg, sizeof(arg));
1075	if (error != 0)
1076		return (error);
1077
1078	if (arg != 0)
1079		error = ioat_reset_hw(ioat);
1080
1081	return (error);
1082}
1083
1084static void
1085dump_descriptor(void *hw_desc)
1086{
1087	int i, j;
1088
1089	for (i = 0; i < 2; i++) {
1090		for (j = 0; j < 8; j++)
1091			printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1092		printf("\n");
1093	}
1094}
1095
1096static void
1097ioat_setup_sysctl(device_t device)
1098{
1099	struct sysctl_oid_list *par;
1100	struct sysctl_ctx_list *ctx;
1101	struct sysctl_oid *tree;
1102	struct ioat_softc *ioat;
1103
1104	ioat = DEVICE2SOFTC(device);
1105	ctx = device_get_sysctl_ctx(device);
1106	tree = device_get_sysctl_tree(device);
1107	par = SYSCTL_CHILDREN(tree);
1108
1109	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1110	    &ioat->ring_size_order, 0, "HW descriptor ring size order");
1111	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0,
1112	    "HW descriptor head pointer index");
1113	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0,
1114	    "HW descriptor tail pointer index");
1115
1116	SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset",
1117	    CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1118	    "Set to non-zero to reset the hardware");
1119}
1120
1121static inline struct ioat_softc *
1122ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1123{
1124	uint32_t old;
1125
1126	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1127
1128	old = atomic_fetchadd_32(&ioat->refcnt, 1);
1129	KASSERT(old < UINT32_MAX, ("refcnt overflow"));
1130
1131#ifdef INVARIANTS
1132	old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
1133	KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
1134#endif
1135
1136	return (ioat);
1137}
1138
1139static inline void
1140ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
1141{
1142	uint32_t old;
1143
1144	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1145
1146	if (n == 0)
1147		return;
1148
1149#ifdef INVARIANTS
1150	old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
1151	KASSERT(old >= n, ("refcnt kind underflow"));
1152#endif
1153
1154	/* Skip acquiring the lock if resulting refcnt > 0. */
1155	for (;;) {
1156		old = ioat->refcnt;
1157		if (old <= n)
1158			break;
1159		if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
1160			return;
1161	}
1162
1163	mtx_lock(IOAT_REFLK);
1164	old = atomic_fetchadd_32(&ioat->refcnt, -n);
1165	KASSERT(old >= n, ("refcnt error"));
1166
1167	if (old == n)
1168		wakeup(IOAT_REFLK);
1169	mtx_unlock(IOAT_REFLK);
1170}
1171
1172static inline void
1173ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1174{
1175
1176	ioat_putn(ioat, 1, kind);
1177}
1178
1179static void
1180ioat_drain(struct ioat_softc *ioat)
1181{
1182
1183	mtx_lock(IOAT_REFLK);
1184	while (ioat->refcnt > 0)
1185		msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
1186	mtx_unlock(IOAT_REFLK);
1187}
1188