ioat.c revision 289911
1234287Sdim/*-
2234287Sdim * Copyright (C) 2012 Intel Corporation
3234287Sdim * All rights reserved.
4234287Sdim *
5234287Sdim * Redistribution and use in source and binary forms, with or without
6234287Sdim * modification, are permitted provided that the following conditions
7234287Sdim * are met:
8234287Sdim * 1. Redistributions of source code must retain the above copyright
9234287Sdim *    notice, this list of conditions and the following disclaimer.
10234287Sdim * 2. Redistributions in binary form must reproduce the above copyright
11234287Sdim *    notice, this list of conditions and the following disclaimer in the
12234287Sdim *    documentation and/or other materials provided with the distribution.
13234287Sdim *
14234287Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15234287Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16234287Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17234287Sdim * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18234287Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19234287Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20234287Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21234287Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22234287Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23234287Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24234287Sdim * SUCH DAMAGE.
25234287Sdim */
26234287Sdim
27234287Sdim#include <sys/cdefs.h>
28234287Sdim__FBSDID("$FreeBSD: head/sys/dev/ioat/ioat.c 289911 2015-10-24 23:46:20Z cem $");
29234287Sdim
30234287Sdim#include <sys/param.h>
31251662Sdim#include <sys/systm.h>
32234287Sdim#include <sys/bus.h>
33234287Sdim#include <sys/conf.h>
34234287Sdim#include <sys/ioccom.h>
35234287Sdim#include <sys/kernel.h>
36234287Sdim#include <sys/lock.h>
37234287Sdim#include <sys/malloc.h>
38234287Sdim#include <sys/module.h>
39234287Sdim#include <sys/mutex.h>
40234287Sdim#include <sys/rman.h>
41234287Sdim#include <sys/sysctl.h>
42234287Sdim#include <sys/time.h>
43234287Sdim#include <dev/pci/pcireg.h>
44234287Sdim#include <dev/pci/pcivar.h>
45234287Sdim#include <machine/bus.h>
46234287Sdim#include <machine/resource.h>
47249423Sdim#include <machine/stdarg.h>
48249423Sdim
49234287Sdim#include "ioat.h"
50234287Sdim#include "ioat_hw.h"
51234287Sdim#include "ioat_internal.h"
52234287Sdim
53234287Sdim#define	IOAT_INTR_TIMO	(hz / 10)
54234287Sdim#define	IOAT_REFLK	(&ioat->submit_lock)
55234287Sdim
56234287Sdimstatic int ioat_probe(device_t device);
57234287Sdimstatic int ioat_attach(device_t device);
58234287Sdimstatic int ioat_detach(device_t device);
59234287Sdimstatic int ioat_setup_intr(struct ioat_softc *ioat);
60234287Sdimstatic int ioat_teardown_intr(struct ioat_softc *ioat);
61234287Sdimstatic int ioat3_attach(device_t device);
62234287Sdimstatic int ioat3_selftest(struct ioat_softc *ioat);
63234287Sdimstatic int ioat_map_pci_bar(struct ioat_softc *ioat);
64234287Sdimstatic void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
65234287Sdim    int error);
66234287Sdimstatic void ioat_interrupt_handler(void *arg);
67234287Sdimstatic boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
68234287Sdimstatic void ioat_process_events(struct ioat_softc *ioat);
69234287Sdimstatic inline uint32_t ioat_get_active(struct ioat_softc *ioat);
70234287Sdimstatic inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
71234287Sdimstatic void ioat_free_ring_entry(struct ioat_softc *ioat,
72234287Sdim    struct ioat_descriptor *desc);
73234287Sdimstatic struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *ioat);
74234287Sdimstatic int ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs);
75234287Sdimstatic struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
76    uint32_t index);
77static boolean_t resize_ring(struct ioat_softc *ioat, int order);
78static void ioat_timer_callback(void *arg);
79static void dump_descriptor(void *hw_desc);
80static void ioat_submit_single(struct ioat_softc *ioat);
81static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
82    int error);
83static int ioat_reset_hw(struct ioat_softc *ioat);
84static void ioat_setup_sysctl(device_t device);
85static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
86static inline struct ioat_softc *ioat_get(struct ioat_softc *,
87    enum ioat_ref_kind);
88static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
89static inline void ioat_putn(struct ioat_softc *, uint32_t,
90    enum ioat_ref_kind);
91static void ioat_drain(struct ioat_softc *);
92
93#define	ioat_log_message(v, ...) do {					\
94	if ((v) <= g_ioat_debug_level) {				\
95		device_printf(ioat->device, __VA_ARGS__);		\
96	}								\
97} while (0)
98
99MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
100SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
101
102static int g_force_legacy_interrupts;
103SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
104    &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
105
106int g_ioat_debug_level = 0;
107SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
108    0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
109
110/*
111 * OS <-> Driver interface structures
112 */
113static device_method_t ioat_pci_methods[] = {
114	/* Device interface */
115	DEVMETHOD(device_probe,     ioat_probe),
116	DEVMETHOD(device_attach,    ioat_attach),
117	DEVMETHOD(device_detach,    ioat_detach),
118	{ 0, 0 }
119};
120
121static driver_t ioat_pci_driver = {
122	"ioat",
123	ioat_pci_methods,
124	sizeof(struct ioat_softc),
125};
126
127static devclass_t ioat_devclass;
128DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
129
130/*
131 * Private data structures
132 */
133static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
134static int ioat_channel_index = 0;
135SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
136    "Number of IOAT channels attached");
137
138static struct _pcsid
139{
140	u_int32_t   type;
141	const char  *desc;
142} pci_ids[] = {
143	{ 0x34308086, "TBG IOAT Ch0" },
144	{ 0x34318086, "TBG IOAT Ch1" },
145	{ 0x34328086, "TBG IOAT Ch2" },
146	{ 0x34338086, "TBG IOAT Ch3" },
147	{ 0x34298086, "TBG IOAT Ch4" },
148	{ 0x342a8086, "TBG IOAT Ch5" },
149	{ 0x342b8086, "TBG IOAT Ch6" },
150	{ 0x342c8086, "TBG IOAT Ch7" },
151
152	{ 0x37108086, "JSF IOAT Ch0" },
153	{ 0x37118086, "JSF IOAT Ch1" },
154	{ 0x37128086, "JSF IOAT Ch2" },
155	{ 0x37138086, "JSF IOAT Ch3" },
156	{ 0x37148086, "JSF IOAT Ch4" },
157	{ 0x37158086, "JSF IOAT Ch5" },
158	{ 0x37168086, "JSF IOAT Ch6" },
159	{ 0x37178086, "JSF IOAT Ch7" },
160	{ 0x37188086, "JSF IOAT Ch0 (RAID)" },
161	{ 0x37198086, "JSF IOAT Ch1 (RAID)" },
162
163	{ 0x3c208086, "SNB IOAT Ch0" },
164	{ 0x3c218086, "SNB IOAT Ch1" },
165	{ 0x3c228086, "SNB IOAT Ch2" },
166	{ 0x3c238086, "SNB IOAT Ch3" },
167	{ 0x3c248086, "SNB IOAT Ch4" },
168	{ 0x3c258086, "SNB IOAT Ch5" },
169	{ 0x3c268086, "SNB IOAT Ch6" },
170	{ 0x3c278086, "SNB IOAT Ch7" },
171	{ 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
172	{ 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
173
174	{ 0x0e208086, "IVB IOAT Ch0" },
175	{ 0x0e218086, "IVB IOAT Ch1" },
176	{ 0x0e228086, "IVB IOAT Ch2" },
177	{ 0x0e238086, "IVB IOAT Ch3" },
178	{ 0x0e248086, "IVB IOAT Ch4" },
179	{ 0x0e258086, "IVB IOAT Ch5" },
180	{ 0x0e268086, "IVB IOAT Ch6" },
181	{ 0x0e278086, "IVB IOAT Ch7" },
182	{ 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
183	{ 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
184
185	{ 0x2f208086, "HSW IOAT Ch0" },
186	{ 0x2f218086, "HSW IOAT Ch1" },
187	{ 0x2f228086, "HSW IOAT Ch2" },
188	{ 0x2f238086, "HSW IOAT Ch3" },
189	{ 0x2f248086, "HSW IOAT Ch4" },
190	{ 0x2f258086, "HSW IOAT Ch5" },
191	{ 0x2f268086, "HSW IOAT Ch6" },
192	{ 0x2f278086, "HSW IOAT Ch7" },
193	{ 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
194	{ 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
195
196	{ 0x0c508086, "BWD IOAT Ch0" },
197	{ 0x0c518086, "BWD IOAT Ch1" },
198	{ 0x0c528086, "BWD IOAT Ch2" },
199	{ 0x0c538086, "BWD IOAT Ch3" },
200
201	{ 0x6f508086, "BDXDE IOAT Ch0" },
202	{ 0x6f518086, "BDXDE IOAT Ch1" },
203	{ 0x6f528086, "BDXDE IOAT Ch2" },
204	{ 0x6f538086, "BDXDE IOAT Ch3" },
205
206	{ 0x00000000, NULL           }
207};
208
209/*
210 * OS <-> Driver linkage functions
211 */
212static int
213ioat_probe(device_t device)
214{
215	struct _pcsid *ep;
216	u_int32_t type;
217
218	type = pci_get_devid(device);
219	for (ep = pci_ids; ep->type; ep++) {
220		if (ep->type == type) {
221			device_set_desc(device, ep->desc);
222			return (0);
223		}
224	}
225	return (ENXIO);
226}
227
228static int
229ioat_attach(device_t device)
230{
231	struct ioat_softc *ioat;
232	int error;
233
234	ioat = DEVICE2SOFTC(device);
235	ioat->device = device;
236
237	error = ioat_map_pci_bar(ioat);
238	if (error != 0)
239		goto err;
240
241	ioat->version = ioat_read_cbver(ioat);
242	if (ioat->version < IOAT_VER_3_0) {
243		error = ENODEV;
244		goto err;
245	}
246
247	error = ioat3_attach(device);
248	if (error != 0)
249		goto err;
250
251	error = pci_enable_busmaster(device);
252	if (error != 0)
253		goto err;
254
255	error = ioat_setup_intr(ioat);
256	if (error != 0)
257		goto err;
258
259	error = ioat3_selftest(ioat);
260	if (error != 0)
261		goto err;
262
263	ioat_process_events(ioat);
264	ioat_setup_sysctl(device);
265
266	ioat_channel[ioat_channel_index++] = ioat;
267	ioat_test_attach();
268
269err:
270	if (error != 0)
271		ioat_detach(device);
272	return (error);
273}
274
275static int
276ioat_detach(device_t device)
277{
278	struct ioat_softc *ioat;
279	uint32_t i;
280
281	ioat = DEVICE2SOFTC(device);
282
283	ioat_test_detach();
284	ioat_drain(ioat);
285
286	ioat_teardown_intr(ioat);
287	callout_drain(&ioat->timer);
288
289	pci_disable_busmaster(device);
290
291	if (ioat->pci_resource != NULL)
292		bus_release_resource(device, SYS_RES_MEMORY,
293		    ioat->pci_resource_id, ioat->pci_resource);
294
295	if (ioat->ring != NULL) {
296		for (i = 0; i < (1 << ioat->ring_size_order); i++)
297			ioat_free_ring_entry(ioat, ioat->ring[i]);
298		free(ioat->ring, M_IOAT);
299	}
300
301	if (ioat->comp_update != NULL) {
302		bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
303		bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
304		    ioat->comp_update_map);
305		bus_dma_tag_destroy(ioat->comp_update_tag);
306	}
307
308	bus_dma_tag_destroy(ioat->hw_desc_tag);
309
310	return (0);
311}
312
313static int
314ioat_teardown_intr(struct ioat_softc *ioat)
315{
316
317	if (ioat->tag != NULL)
318		bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
319
320	if (ioat->res != NULL)
321		bus_release_resource(ioat->device, SYS_RES_IRQ,
322		    rman_get_rid(ioat->res), ioat->res);
323
324	pci_release_msi(ioat->device);
325	return (0);
326}
327
328static int
329ioat3_selftest(struct ioat_softc *ioat)
330{
331	uint64_t status;
332	uint32_t chanerr;
333	int i;
334
335	ioat_acquire(&ioat->dmaengine);
336	ioat_null(&ioat->dmaengine, NULL, NULL, 0);
337	ioat_release(&ioat->dmaengine);
338
339	for (i = 0; i < 100; i++) {
340		DELAY(1);
341		status = ioat_get_chansts(ioat);
342		if (is_ioat_idle(status))
343			return (0);
344	}
345
346	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
347	ioat_log_message(0, "could not start channel: "
348	    "status = %#jx error = %x\n", (uintmax_t)status, chanerr);
349	return (ENXIO);
350}
351
352/*
353 * Initialize Hardware
354 */
355static int
356ioat3_attach(device_t device)
357{
358	struct ioat_softc *ioat;
359	struct ioat_descriptor **ring;
360	struct ioat_descriptor *next;
361	struct ioat_dma_hw_descriptor *dma_hw_desc;
362	uint32_t capabilities;
363	int i, num_descriptors;
364	int error;
365	uint8_t xfercap;
366
367	error = 0;
368	ioat = DEVICE2SOFTC(device);
369	capabilities = ioat_read_dmacapability(ioat);
370
371	xfercap = ioat_read_xfercap(ioat);
372	ioat->max_xfer_size = 1 << xfercap;
373
374	/* TODO: need to check DCA here if we ever do XOR/PQ */
375
376	mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
377	mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF);
378	callout_init(&ioat->timer, 1);
379
380	ioat->is_resize_pending = FALSE;
381	ioat->is_completion_pending = FALSE;
382	ioat->is_reset_pending = FALSE;
383	ioat->is_channel_running = FALSE;
384	ioat->is_waiting_for_ack = FALSE;
385
386	bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
387	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
388	    sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
389	    &ioat->comp_update_tag);
390
391	error = bus_dmamem_alloc(ioat->comp_update_tag,
392	    (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
393	if (ioat->comp_update == NULL)
394		return (ENOMEM);
395
396	error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
397	    ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
398	    0);
399	if (error != 0)
400		return (error);
401
402	ioat->ring_size_order = IOAT_MIN_ORDER;
403
404	num_descriptors = 1 << ioat->ring_size_order;
405
406	bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
407	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
408	    sizeof(struct ioat_dma_hw_descriptor), 1,
409	    sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
410	    &ioat->hw_desc_tag);
411
412	ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
413	    M_ZERO | M_NOWAIT);
414	if (ioat->ring == NULL)
415		return (ENOMEM);
416
417	ring = ioat->ring;
418	for (i = 0; i < num_descriptors; i++) {
419		ring[i] = ioat_alloc_ring_entry(ioat);
420		if (ring[i] == NULL)
421			return (ENOMEM);
422
423		ring[i]->id = i;
424	}
425
426	for (i = 0; i < num_descriptors - 1; i++) {
427		next = ring[i + 1];
428		dma_hw_desc = ring[i]->u.dma;
429
430		dma_hw_desc->next = next->hw_desc_bus_addr;
431	}
432
433	ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
434
435	ioat->head = 0;
436	ioat->tail = 0;
437	ioat->last_seen = 0;
438
439	error = ioat_reset_hw(ioat);
440	if (error != 0)
441		return (error);
442
443	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
444	ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
445	ioat_write_chainaddr(ioat, ring[0]->hw_desc_bus_addr);
446	return (0);
447}
448
449static int
450ioat_map_pci_bar(struct ioat_softc *ioat)
451{
452
453	ioat->pci_resource_id = PCIR_BAR(0);
454	ioat->pci_resource = bus_alloc_resource_any(ioat->device,
455	    SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
456
457	if (ioat->pci_resource == NULL) {
458		ioat_log_message(0, "unable to allocate pci resource\n");
459		return (ENODEV);
460	}
461
462	ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
463	ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
464	return (0);
465}
466
467static void
468ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
469{
470	struct ioat_softc *ioat = arg;
471
472	ioat->comp_update_bus_addr = seg[0].ds_addr;
473}
474
475static void
476ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
477{
478	bus_addr_t *baddr;
479
480	baddr = arg;
481	*baddr = segs->ds_addr;
482}
483
484/*
485 * Interrupt setup and handlers
486 */
487static int
488ioat_setup_intr(struct ioat_softc *ioat)
489{
490	uint32_t num_vectors;
491	int error;
492	boolean_t use_msix;
493	boolean_t force_legacy_interrupts;
494
495	use_msix = FALSE;
496	force_legacy_interrupts = FALSE;
497
498	if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
499		num_vectors = 1;
500		pci_alloc_msix(ioat->device, &num_vectors);
501		if (num_vectors == 1)
502			use_msix = TRUE;
503	}
504
505	if (use_msix) {
506		ioat->rid = 1;
507		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
508		    &ioat->rid, RF_ACTIVE);
509	} else {
510		ioat->rid = 0;
511		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
512		    &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
513	}
514	if (ioat->res == NULL) {
515		ioat_log_message(0, "bus_alloc_resource failed\n");
516		return (ENOMEM);
517	}
518
519	ioat->tag = NULL;
520	error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
521	    INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
522	if (error != 0) {
523		ioat_log_message(0, "bus_setup_intr failed\n");
524		return (error);
525	}
526
527	ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
528	return (0);
529}
530
531static boolean_t
532ioat_model_resets_msix(struct ioat_softc *ioat)
533{
534	u_int32_t pciid;
535
536	pciid = pci_get_devid(ioat->device);
537	switch (pciid) {
538		/* BWD: */
539	case 0x0c508086:
540	case 0x0c518086:
541	case 0x0c528086:
542	case 0x0c538086:
543		/* BDXDE: */
544	case 0x6f508086:
545	case 0x6f518086:
546	case 0x6f528086:
547	case 0x6f538086:
548		return (TRUE);
549	}
550
551	return (FALSE);
552}
553
554static void
555ioat_interrupt_handler(void *arg)
556{
557	struct ioat_softc *ioat = arg;
558
559	ioat_process_events(ioat);
560}
561
562static void
563ioat_process_events(struct ioat_softc *ioat)
564{
565	struct ioat_descriptor *desc;
566	struct bus_dmadesc *dmadesc;
567	uint64_t comp_update, status;
568	uint32_t completed;
569
570	mtx_lock(&ioat->cleanup_lock);
571
572	completed = 0;
573	comp_update = *ioat->comp_update;
574	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
575
576	ioat_log_message(3, "%s\n", __func__);
577
578	if (status == ioat->last_seen)
579		goto out;
580
581	while (1) {
582		desc = ioat_get_ring_entry(ioat, ioat->tail);
583		dmadesc = &desc->bus_dmadesc;
584		ioat_log_message(3, "completing desc %d\n", ioat->tail);
585
586		if (dmadesc->callback_fn)
587			(*dmadesc->callback_fn)(dmadesc->callback_arg);
588
589		completed++;
590		ioat->tail++;
591		if (desc->hw_desc_bus_addr == status)
592			break;
593	}
594
595	ioat->last_seen = desc->hw_desc_bus_addr;
596
597	if (ioat->head == ioat->tail) {
598		ioat->is_completion_pending = FALSE;
599		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
600		    ioat_timer_callback, ioat);
601	}
602
603out:
604	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
605	mtx_unlock(&ioat->cleanup_lock);
606
607	ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
608}
609
610/*
611 * User API functions
612 */
613bus_dmaengine_t
614ioat_get_dmaengine(uint32_t index)
615{
616
617	if (index >= ioat_channel_index)
618		return (NULL);
619	return (&ioat_get(ioat_channel[index], IOAT_DMAENGINE_REF)->dmaengine);
620}
621
622void
623ioat_put_dmaengine(bus_dmaengine_t dmaengine)
624{
625	struct ioat_softc *ioat;
626
627	ioat = to_ioat_softc(dmaengine);
628	ioat_put(ioat, IOAT_DMAENGINE_REF);
629}
630
631void
632ioat_acquire(bus_dmaengine_t dmaengine)
633{
634	struct ioat_softc *ioat;
635
636	ioat = to_ioat_softc(dmaengine);
637	mtx_lock(&ioat->submit_lock);
638	ioat_log_message(3, "%s\n", __func__);
639}
640
641void
642ioat_release(bus_dmaengine_t dmaengine)
643{
644	struct ioat_softc *ioat;
645
646	ioat = to_ioat_softc(dmaengine);
647	ioat_log_message(3, "%s\n", __func__);
648	ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->head);
649	mtx_unlock(&ioat->submit_lock);
650}
651
652struct bus_dmadesc *
653ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
654    void *callback_arg, uint32_t flags)
655{
656	struct ioat_softc *ioat;
657	struct ioat_descriptor *desc;
658	struct ioat_dma_hw_descriptor *hw_desc;
659
660	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
661		flags & ~DMA_ALL_FLAGS));
662
663	ioat = to_ioat_softc(dmaengine);
664	mtx_assert(&ioat->submit_lock, MA_OWNED);
665
666	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
667		return (NULL);
668
669	ioat_log_message(3, "%s\n", __func__);
670
671	desc = ioat_get_ring_entry(ioat, ioat->head);
672	hw_desc = desc->u.dma;
673
674	hw_desc->u.control_raw = 0;
675	hw_desc->u.control.null = 1;
676	hw_desc->u.control.completion_update = 1;
677
678	if ((flags & DMA_INT_EN) != 0)
679		hw_desc->u.control.int_enable = 1;
680
681	hw_desc->size = 8;
682	hw_desc->src_addr = 0;
683	hw_desc->dest_addr = 0;
684
685	desc->bus_dmadesc.callback_fn = callback_fn;
686	desc->bus_dmadesc.callback_arg = callback_arg;
687
688	ioat_submit_single(ioat);
689	return (&desc->bus_dmadesc);
690}
691
692struct bus_dmadesc *
693ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
694    bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
695    void *callback_arg, uint32_t flags)
696{
697	struct ioat_descriptor *desc;
698	struct ioat_dma_hw_descriptor *hw_desc;
699	struct ioat_softc *ioat;
700
701	KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
702		flags & ~DMA_ALL_FLAGS));
703
704	ioat = to_ioat_softc(dmaengine);
705	mtx_assert(&ioat->submit_lock, MA_OWNED);
706
707	if (len > ioat->max_xfer_size) {
708		ioat_log_message(0, "%s: max_xfer_size = %d, requested = %d\n",
709		    __func__, ioat->max_xfer_size, (int)len);
710		return (NULL);
711	}
712
713	if (ioat_reserve_space_and_lock(ioat, 1) != 0)
714		return (NULL);
715
716	ioat_log_message(3, "%s\n", __func__);
717
718	desc = ioat_get_ring_entry(ioat, ioat->head);
719	hw_desc = desc->u.dma;
720
721	hw_desc->u.control_raw = 0;
722	hw_desc->u.control.completion_update = 1;
723
724	if ((flags & DMA_INT_EN) != 0)
725		hw_desc->u.control.int_enable = 1;
726
727	hw_desc->size = len;
728	hw_desc->src_addr = src;
729	hw_desc->dest_addr = dst;
730
731	if (g_ioat_debug_level >= 3)
732		dump_descriptor(hw_desc);
733
734	desc->bus_dmadesc.callback_fn = callback_fn;
735	desc->bus_dmadesc.callback_arg = callback_arg;
736
737	ioat_submit_single(ioat);
738	return (&desc->bus_dmadesc);
739}
740
741/*
742 * Ring Management
743 */
744static inline uint32_t
745ioat_get_active(struct ioat_softc *ioat)
746{
747
748	return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
749}
750
751static inline uint32_t
752ioat_get_ring_space(struct ioat_softc *ioat)
753{
754
755	return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
756}
757
758static struct ioat_descriptor *
759ioat_alloc_ring_entry(struct ioat_softc *ioat)
760{
761	struct ioat_dma_hw_descriptor *hw_desc;
762	struct ioat_descriptor *desc;
763	int error;
764
765	error = ENOMEM;
766	hw_desc = NULL;
767
768	desc = malloc(sizeof(*desc), M_IOAT, M_NOWAIT);
769	if (desc == NULL)
770		goto out;
771
772	bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
773	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &ioat->hw_desc_map);
774	if (hw_desc == NULL)
775		goto out;
776
777	desc->u.dma = hw_desc;
778
779	error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
780	    sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
781	    BUS_DMA_NOWAIT);
782	if (error)
783		goto out;
784
785out:
786	if (error) {
787		ioat_free_ring_entry(ioat, desc);
788		return (NULL);
789	}
790	return (desc);
791}
792
793static void
794ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
795{
796
797	if (desc == NULL)
798		return;
799
800	if (desc->u.dma)
801		bus_dmamem_free(ioat->hw_desc_tag, desc->u.dma,
802		    ioat->hw_desc_map);
803	free(desc, M_IOAT);
804}
805
806static int
807ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs)
808{
809	boolean_t retry;
810
811	while (1) {
812		if (ioat_get_ring_space(ioat) >= num_descs)
813			return (0);
814
815		mtx_lock(&ioat->cleanup_lock);
816		retry = resize_ring(ioat, ioat->ring_size_order + 1);
817		mtx_unlock(&ioat->cleanup_lock);
818
819		if (!retry)
820			return (ENOMEM);
821	}
822}
823
824static struct ioat_descriptor *
825ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
826{
827
828	return (ioat->ring[index % (1 << ioat->ring_size_order)]);
829}
830
831static boolean_t
832resize_ring(struct ioat_softc *ioat, int order)
833{
834	struct ioat_descriptor **ring;
835	struct ioat_descriptor *next;
836	struct ioat_dma_hw_descriptor *hw;
837	struct ioat_descriptor *ent;
838	uint32_t current_size, active, new_size, i, new_idx, current_idx;
839	uint32_t new_idx2;
840
841	current_size = 1 << ioat->ring_size_order;
842	active = (ioat->head - ioat->tail) & (current_size - 1);
843	new_size = 1 << order;
844
845	if (order > IOAT_MAX_ORDER)
846		return (FALSE);
847
848	/*
849	 * when shrinking, verify that we can hold the current active
850	 * set in the new ring
851	 */
852	if (active >= new_size)
853		return (FALSE);
854
855	/* allocate the array to hold the software ring */
856	ring = malloc(new_size * sizeof(*ring), M_IOAT, M_ZERO | M_NOWAIT);
857	if (ring == NULL)
858		return (FALSE);
859
860	ioat_log_message(2, "ring resize: new: %d old: %d\n",
861	    new_size, current_size);
862
863	/* allocate/trim descriptors as needed */
864	if (new_size > current_size) {
865		/* copy current descriptors to the new ring */
866		for (i = 0; i < current_size; i++) {
867			current_idx = (ioat->tail + i) & (current_size - 1);
868			new_idx = (ioat->tail + i) & (new_size - 1);
869
870			ring[new_idx] = ioat->ring[current_idx];
871			ring[new_idx]->id = new_idx;
872		}
873
874		/* add new descriptors to the ring */
875		for (i = current_size; i < new_size; i++) {
876			new_idx = (ioat->tail + i) & (new_size - 1);
877
878			ring[new_idx] = ioat_alloc_ring_entry(ioat);
879			if (ring[new_idx] == NULL) {
880				while (i--) {
881					new_idx2 = (ioat->tail + i) &
882					    (new_size - 1);
883
884					ioat_free_ring_entry(ioat,
885					    ring[new_idx2]);
886				}
887				free(ring, M_IOAT);
888				return (FALSE);
889			}
890			ring[new_idx]->id = new_idx;
891		}
892
893		for (i = current_size - 1; i < new_size; i++) {
894			new_idx = (ioat->tail + i) & (new_size - 1);
895			next = ring[(new_idx + 1) & (new_size - 1)];
896			hw = ring[new_idx]->u.dma;
897
898			hw->next = next->hw_desc_bus_addr;
899		}
900	} else {
901		/*
902		 * copy current descriptors to the new ring, dropping the
903		 * removed descriptors
904		 */
905		for (i = 0; i < new_size; i++) {
906			current_idx = (ioat->tail + i) & (current_size - 1);
907			new_idx = (ioat->tail + i) & (new_size - 1);
908
909			ring[new_idx] = ioat->ring[current_idx];
910			ring[new_idx]->id = new_idx;
911		}
912
913		/* free deleted descriptors */
914		for (i = new_size; i < current_size; i++) {
915			ent = ioat_get_ring_entry(ioat, ioat->tail + i);
916			ioat_free_ring_entry(ioat, ent);
917		}
918
919		/* fix up hardware ring */
920		hw = ring[(ioat->tail + new_size - 1) & (new_size - 1)]->u.dma;
921		next = ring[(ioat->tail + new_size) & (new_size - 1)];
922		hw->next = next->hw_desc_bus_addr;
923	}
924
925	free(ioat->ring, M_IOAT);
926	ioat->ring = ring;
927	ioat->ring_size_order = order;
928
929	return (TRUE);
930}
931
932static void
933ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
934{
935	struct ioat_descriptor *desc;
936
937	ioat_log_message(0, "Channel halted (%x)\n", chanerr);
938	if (chanerr == 0)
939		return;
940
941	desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
942	dump_descriptor(desc->u.raw);
943
944	desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
945	dump_descriptor(desc->u.raw);
946}
947
948static void
949ioat_timer_callback(void *arg)
950{
951	struct ioat_softc *ioat;
952	uint64_t status;
953	uint32_t chanerr;
954
955	ioat = arg;
956	ioat_log_message(1, "%s\n", __func__);
957
958	if (ioat->is_completion_pending) {
959		status = ioat_get_chansts(ioat);
960
961		/*
962		 * When halted due to errors, check for channel programming
963		 * errors before advancing the completion state.
964		 */
965		if (is_ioat_halted(status)) {
966			chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
967			ioat_halted_debug(ioat, chanerr);
968		}
969		ioat_process_events(ioat);
970	} else {
971		mtx_lock(&ioat->submit_lock);
972		mtx_lock(&ioat->cleanup_lock);
973
974		if (ioat_get_active(ioat) == 0 &&
975		    ioat->ring_size_order > IOAT_MIN_ORDER)
976			resize_ring(ioat, ioat->ring_size_order - 1);
977
978		mtx_unlock(&ioat->cleanup_lock);
979		mtx_unlock(&ioat->submit_lock);
980
981		if (ioat->ring_size_order > IOAT_MIN_ORDER)
982			callout_reset(&ioat->timer, IOAT_INTR_TIMO,
983			    ioat_timer_callback, ioat);
984	}
985}
986
987/*
988 * Support Functions
989 */
990static void
991ioat_submit_single(struct ioat_softc *ioat)
992{
993
994	ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
995	atomic_add_rel_int(&ioat->head, 1);
996
997	if (!ioat->is_completion_pending) {
998		ioat->is_completion_pending = TRUE;
999		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
1000		    ioat_timer_callback, ioat);
1001	}
1002}
1003
1004static int
1005ioat_reset_hw(struct ioat_softc *ioat)
1006{
1007	uint64_t status;
1008	uint32_t chanerr;
1009	int timeout;
1010
1011	status = ioat_get_chansts(ioat);
1012	if (is_ioat_active(status) || is_ioat_idle(status))
1013		ioat_suspend(ioat);
1014
1015	/* Wait at most 20 ms */
1016	for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1017	    timeout < 20; timeout++) {
1018		DELAY(1000);
1019		status = ioat_get_chansts(ioat);
1020	}
1021	if (timeout == 20)
1022		return (ETIMEDOUT);
1023
1024	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1025	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1026
1027	/*
1028	 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1029	 *  that can cause stability issues for IOAT v3.
1030	 */
1031	pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1032	    4);
1033	chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1034	pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1035
1036	/*
1037	 * BDXDE and BWD models reset MSI-X registers on device reset.
1038	 * Save/restore their contents manually.
1039	 */
1040	if (ioat_model_resets_msix(ioat)) {
1041		ioat_log_message(1, "device resets MSI-X registers; saving\n");
1042		pci_save_state(ioat->device);
1043	}
1044
1045	ioat_reset(ioat);
1046
1047	/* Wait at most 20 ms */
1048	for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1049		DELAY(1000);
1050	if (timeout == 20)
1051		return (ETIMEDOUT);
1052
1053	if (ioat_model_resets_msix(ioat)) {
1054		ioat_log_message(1, "device resets registers; restored\n");
1055		pci_restore_state(ioat->device);
1056	}
1057
1058	return (0);
1059}
1060
1061static int
1062sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1063{
1064	struct ioat_softc *ioat;
1065	int error, arg;
1066
1067	ioat = arg1;
1068
1069	arg = 0;
1070	error = SYSCTL_OUT(req, &arg, sizeof(arg));
1071	if (error != 0 || req->newptr == NULL)
1072		return (error);
1073
1074	error = SYSCTL_IN(req, &arg, sizeof(arg));
1075	if (error != 0)
1076		return (error);
1077
1078	if (arg != 0)
1079		error = ioat_reset_hw(ioat);
1080
1081	return (error);
1082}
1083
1084static void
1085dump_descriptor(void *hw_desc)
1086{
1087	int i, j;
1088
1089	for (i = 0; i < 2; i++) {
1090		for (j = 0; j < 8; j++)
1091			printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1092		printf("\n");
1093	}
1094}
1095
1096static void
1097ioat_setup_sysctl(device_t device)
1098{
1099	struct sysctl_oid_list *par;
1100	struct sysctl_ctx_list *ctx;
1101	struct sysctl_oid *tree;
1102	struct ioat_softc *ioat;
1103
1104	ioat = DEVICE2SOFTC(device);
1105	ctx = device_get_sysctl_ctx(device);
1106	tree = device_get_sysctl_tree(device);
1107	par = SYSCTL_CHILDREN(tree);
1108
1109	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1110	    &ioat->ring_size_order, 0, "HW descriptor ring size order");
1111	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0,
1112	    "HW descriptor head pointer index");
1113	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0,
1114	    "HW descriptor tail pointer index");
1115
1116	SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset",
1117	    CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1118	    "Set to non-zero to reset the hardware");
1119}
1120
1121static inline struct ioat_softc *
1122ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1123{
1124	uint32_t old;
1125
1126	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1127
1128	old = atomic_fetchadd_32(&ioat->refcnt, 1);
1129	KASSERT(old < UINT32_MAX, ("refcnt overflow"));
1130
1131#ifdef INVARIANTS
1132	old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
1133	KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
1134#endif
1135
1136	return (ioat);
1137}
1138
1139static inline void
1140ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
1141{
1142	uint32_t old;
1143
1144	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1145
1146	if (n == 0)
1147		return;
1148
1149#ifdef INVARIANTS
1150	old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
1151	KASSERT(old >= n, ("refcnt kind underflow"));
1152#endif
1153
1154	/* Skip acquiring the lock if resulting refcnt > 0. */
1155	for (;;) {
1156		old = ioat->refcnt;
1157		if (old <= n)
1158			break;
1159		if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
1160			return;
1161	}
1162
1163	mtx_lock(IOAT_REFLK);
1164	old = atomic_fetchadd_32(&ioat->refcnt, -n);
1165	KASSERT(old >= n, ("refcnt error"));
1166
1167	if (old == n)
1168		wakeup(IOAT_REFLK);
1169	mtx_unlock(IOAT_REFLK);
1170}
1171
1172static inline void
1173ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1174{
1175
1176	ioat_putn(ioat, 1, kind);
1177}
1178
1179static void
1180ioat_drain(struct ioat_softc *ioat)
1181{
1182
1183	mtx_lock(IOAT_REFLK);
1184	while (ioat->refcnt > 0)
1185		msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
1186	mtx_unlock(IOAT_REFLK);
1187}
1188