1/*-
2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/ata.h>
33#include <sys/kernel.h>
34#include <sys/endian.h>
35#include <sys/malloc.h>
36#include <sys/lock.h>
37#include <sys/sema.h>
38#include <sys/taskqueue.h>
39#include <vm/uma.h>
40#include <sys/bus.h>
41#include <machine/bus.h>
42#include <sys/rman.h>
43#include <dev/ata/ata-all.h>
44
45/* prototypes */
46static void ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
47static void ata_dmaalloc(device_t dev);
48static void ata_dmafree(device_t dev);
49static void ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
50static int ata_dmaload(struct ata_request *request, void *addr, int *nsegs);
51static int ata_dmaunload(struct ata_request *request);
52
53/* local vars */
54static MALLOC_DEFINE(M_ATADMA, "ata_dma", "ATA driver DMA");
55
56/* misc defines */
57#define MAXTABSZ        PAGE_SIZE
58#define MAXWSPCSZ       PAGE_SIZE*2
59
60struct ata_dc_cb_args {
61    bus_addr_t maddr;
62    int error;
63};
64
65void
66ata_dmainit(device_t dev)
67{
68    struct ata_channel *ch = device_get_softc(dev);
69    struct ata_dc_cb_args dcba;
70
71    if (ch->dma.alloc == NULL)
72	ch->dma.alloc = ata_dmaalloc;
73    if (ch->dma.free == NULL)
74	ch->dma.free = ata_dmafree;
75    if (ch->dma.setprd == NULL)
76	ch->dma.setprd = ata_dmasetprd;
77    if (ch->dma.load == NULL)
78	ch->dma.load = ata_dmaload;
79    if (ch->dma.unload == NULL)
80	ch->dma.unload = ata_dmaunload;
81    if (ch->dma.alignment == 0)
82	ch->dma.alignment = 2;
83    if (ch->dma.boundary == 0)
84	ch->dma.boundary = 65536;
85    if (ch->dma.segsize == 0)
86	ch->dma.segsize = 65536;
87    if (ch->dma.max_iosize == 0)
88	ch->dma.max_iosize = MIN((ATA_DMA_ENTRIES - 1) * PAGE_SIZE, MAXPHYS);
89    if (ch->dma.max_address == 0)
90	ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT;
91    if (ch->dma.dma_slots == 0)
92	ch->dma.dma_slots = 1;
93
94    if (bus_dma_tag_create(bus_get_dma_tag(dev), ch->dma.alignment, 0,
95			   ch->dma.max_address, BUS_SPACE_MAXADDR,
96			   NULL, NULL, ch->dma.max_iosize,
97			   ATA_DMA_ENTRIES, ch->dma.segsize,
98			   0, NULL, NULL, &ch->dma.dmatag))
99	goto error;
100
101    if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, 64 * 1024,
102			   ch->dma.max_address, BUS_SPACE_MAXADDR,
103			   NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ,
104			   0, NULL, NULL, &ch->dma.work_tag))
105	goto error;
106
107    if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
108			 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
109			 &ch->dma.work_map))
110	goto error;
111
112    if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
113			MAXWSPCSZ, ata_dmasetupc_cb, &dcba, 0) ||
114			dcba.error) {
115	bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
116	goto error;
117    }
118    ch->dma.work_bus = dcba.maddr;
119    return;
120
121error:
122    device_printf(dev, "WARNING - DMA initialization failed, disabling DMA\n");
123    ata_dmafini(dev);
124}
125
126void
127ata_dmafini(device_t dev)
128{
129    struct ata_channel *ch = device_get_softc(dev);
130
131    if (ch->dma.work_bus) {
132	bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
133	bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
134	ch->dma.work_bus = 0;
135	ch->dma.work_map = NULL;
136	ch->dma.work = NULL;
137    }
138    if (ch->dma.work_tag) {
139	bus_dma_tag_destroy(ch->dma.work_tag);
140	ch->dma.work_tag = NULL;
141    }
142    if (ch->dma.dmatag) {
143	bus_dma_tag_destroy(ch->dma.dmatag);
144	ch->dma.dmatag = NULL;
145    }
146}
147
148static void
149ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
150{
151    struct ata_dc_cb_args *dcba = (struct ata_dc_cb_args *)xsc;
152
153    if (!(dcba->error = error))
154	dcba->maddr = segs[0].ds_addr;
155}
156
157static void
158ata_dmaalloc(device_t dev)
159{
160    struct ata_channel *ch = device_get_softc(dev);
161    struct ata_dc_cb_args dcba;
162    int i;
163
164    /* alloc and setup needed dma slots */
165    bzero(ch->dma.slot, sizeof(struct ata_dmaslot) * ATA_DMA_SLOTS);
166    for (i = 0; i < ch->dma.dma_slots; i++) {
167	struct ata_dmaslot *slot = &ch->dma.slot[i];
168
169	if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, PAGE_SIZE,
170			       ch->dma.max_address, BUS_SPACE_MAXADDR,
171			       NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE,
172			       0, NULL, NULL, &slot->sg_tag)) {
173            device_printf(ch->dev, "FAILURE - create sg_tag\n");
174            goto error;
175	}
176
177	if (bus_dmamem_alloc(slot->sg_tag, (void **)&slot->sg, BUS_DMA_WAITOK,
178			     &slot->sg_map)) {
179	    device_printf(ch->dev, "FAILURE - alloc sg_map\n");
180	    goto error;
181        }
182
183	if (bus_dmamap_load(slot->sg_tag, slot->sg_map, slot->sg, MAXTABSZ,
184			    ata_dmasetupc_cb, &dcba, 0) || dcba.error) {
185	    device_printf(ch->dev, "FAILURE - load sg\n");
186	    goto error;
187	}
188	slot->sg_bus = dcba.maddr;
189
190	if (bus_dma_tag_create(ch->dma.dmatag,
191			       ch->dma.alignment, ch->dma.boundary,
192                               ch->dma.max_address, BUS_SPACE_MAXADDR,
193                               NULL, NULL, ch->dma.max_iosize,
194                               ATA_DMA_ENTRIES, ch->dma.segsize,
195                               BUS_DMA_ALLOCNOW, NULL, NULL, &slot->data_tag)) {
196	    device_printf(ch->dev, "FAILURE - create data_tag\n");
197	    goto error;
198	}
199
200	if (bus_dmamap_create(slot->data_tag, 0, &slot->data_map)) {
201	    device_printf(ch->dev, "FAILURE - create data_map\n");
202	    goto error;
203        }
204    }
205
206    return;
207
208error:
209    device_printf(dev, "WARNING - DMA allocation failed, disabling DMA\n");
210    ata_dmafree(dev);
211}
212
213static void
214ata_dmafree(device_t dev)
215{
216    struct ata_channel *ch = device_get_softc(dev);
217    int i;
218
219    /* free all dma slots */
220    for (i = 0; i < ATA_DMA_SLOTS; i++) {
221	struct ata_dmaslot *slot = &ch->dma.slot[i];
222
223	if (slot->sg_bus) {
224            bus_dmamap_unload(slot->sg_tag, slot->sg_map);
225            slot->sg_bus = 0;
226	}
227	if (slot->sg_map) {
228            bus_dmamem_free(slot->sg_tag, slot->sg, slot->sg_map);
229            bus_dmamap_destroy(slot->sg_tag, slot->sg_map);
230            slot->sg = NULL;
231            slot->sg_map = NULL;
232	}
233	if (slot->data_map) {
234            bus_dmamap_destroy(slot->data_tag, slot->data_map);
235            slot->data_map = NULL;
236	}
237	if (slot->sg_tag) {
238            bus_dma_tag_destroy(slot->sg_tag);
239            slot->sg_tag = NULL;
240	}
241	if (slot->data_tag) {
242            bus_dma_tag_destroy(slot->data_tag);
243            slot->data_tag = NULL;
244	}
245    }
246}
247
248static void
249ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
250{
251    struct ata_dmasetprd_args *args = xsc;
252    struct ata_dma_prdentry *prd = args->dmatab;
253    int i;
254
255    if ((args->error = error))
256	return;
257
258    for (i = 0; i < nsegs; i++) {
259	prd[i].addr = htole32(segs[i].ds_addr);
260	prd[i].count = htole32(segs[i].ds_len);
261    }
262    prd[i - 1].count |= htole32(ATA_DMA_EOT);
263    KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
264    args->nsegs = nsegs;
265}
266
267static int
268ata_dmaload(struct ata_request *request, void *addr, int *entries)
269{
270    struct ata_channel *ch = device_get_softc(request->parent);
271    struct ata_dmasetprd_args dspa;
272    int error;
273
274    ATA_DEBUG_RQ(request, "dmaload");
275
276    if (request->dma) {
277	device_printf(request->parent,
278		      "FAILURE - already active DMA on this device\n");
279	return EIO;
280    }
281    if (!request->bytecount) {
282	device_printf(request->parent,
283		      "FAILURE - zero length DMA transfer attempted\n");
284	return EIO;
285    }
286    if (request->bytecount & (ch->dma.alignment - 1)) {
287	device_printf(request->parent,
288		      "FAILURE - odd-sized DMA transfer attempt %d %% %d\n",
289		      request->bytecount, ch->dma.alignment);
290	return EIO;
291    }
292    if (request->bytecount > ch->dma.max_iosize) {
293	device_printf(request->parent,
294		      "FAILURE - oversized DMA transfer attempt %d > %d\n",
295		      request->bytecount, ch->dma.max_iosize);
296	return EIO;
297    }
298
299    /* set our slot. XXX SOS NCQ will change that */
300    request->dma = &ch->dma.slot[0];
301
302    if (addr)
303	dspa.dmatab = addr;
304    else
305	dspa.dmatab = request->dma->sg;
306
307    if (request->flags & ATA_R_DATA_IN_CCB)
308        error = bus_dmamap_load_ccb(request->dma->data_tag,
309				request->dma->data_map, request->ccb,
310				ch->dma.setprd, &dspa, BUS_DMA_NOWAIT);
311    else
312        error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map,
313				request->data, request->bytecount,
314				ch->dma.setprd, &dspa, BUS_DMA_NOWAIT);
315    if (error || (error = dspa.error)) {
316	device_printf(request->parent, "FAILURE - load data\n");
317	goto error;
318    }
319
320    if (entries)
321	*entries = dspa.nsegs;
322
323    bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
324		    BUS_DMASYNC_PREWRITE);
325    bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
326		    (request->flags & ATA_R_READ) ?
327		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
328    return 0;
329
330error:
331    ata_dmaunload(request);
332    return EIO;
333}
334
335int
336ata_dmaunload(struct ata_request *request)
337{
338    ATA_DEBUG_RQ(request, "dmaunload");
339
340    if (request->dma) {
341	bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
342			BUS_DMASYNC_POSTWRITE);
343	bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
344			(request->flags & ATA_R_READ) ?
345			BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
346
347	bus_dmamap_unload(request->dma->data_tag, request->dma->data_map);
348        request->dma = NULL;
349    }
350    return 0;
351}
352