1/*-
2 * Copyright (c) 2015 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD$");
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <machine/bus.h>
32#include <machine/bus_dma.h>
33#include <machine/resource.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/kernel.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
39#include <sys/proc.h>
40#include <sys/queue.h>
41#include <sys/rman.h>
42#include <sys/sbuf.h>
43#include <sys/uio.h>
44#include <vm/vm.h>
45#include <vm/pmap.h>
46#include <vm/vm_map.h>
47
48#include <dev/proto/proto.h>
49#include <dev/proto/proto_dev.h>
50#include <dev/proto/proto_busdma.h>
51
52MALLOC_DEFINE(M_PROTO_BUSDMA, "proto_busdma", "DMA management data");
53
54#define	BNDRY_MIN(a, b)		\
55	(((a) == 0) ? (b) : (((b) == 0) ? (a) : MIN((a), (b))))
56
57struct proto_callback_bundle {
58	struct proto_busdma *busdma;
59	struct proto_md *md;
60	struct proto_ioc_busdma *ioc;
61};
62
63static int
64proto_busdma_tag_create(struct proto_busdma *busdma, struct proto_tag *parent,
65    struct proto_ioc_busdma *ioc)
66{
67	struct proto_tag *tag;
68
69	/* Make sure that when a boundary is specified, it's a power of 2 */
70	if (ioc->u.tag.bndry != 0 &&
71	    (ioc->u.tag.bndry & (ioc->u.tag.bndry - 1)) != 0)
72		return (EINVAL);
73
74	/*
75	 * If nsegs is 1, ignore maxsegsz. What this means is that if we have
76	 * just 1 segment, then maxsz should be equal to maxsegsz. To keep it
77	 * simple for us, limit maxsegsz to maxsz in any case.
78	 */
79	if (ioc->u.tag.maxsegsz > ioc->u.tag.maxsz || ioc->u.tag.nsegs == 1)
80		ioc->u.tag.maxsegsz = ioc->u.tag.maxsz;
81
82	tag = malloc(sizeof(*tag), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
83	if (parent != NULL) {
84		tag->parent = parent;
85		LIST_INSERT_HEAD(&parent->children, tag, peers);
86		tag->align = MAX(ioc->u.tag.align, parent->align);
87		tag->bndry = BNDRY_MIN(ioc->u.tag.bndry, parent->bndry);
88		tag->maxaddr = MIN(ioc->u.tag.maxaddr, parent->maxaddr);
89		tag->maxsz = MIN(ioc->u.tag.maxsz, parent->maxsz);
90		tag->maxsegsz = MIN(ioc->u.tag.maxsegsz, parent->maxsegsz);
91		tag->nsegs = MIN(ioc->u.tag.nsegs, parent->nsegs);
92		tag->datarate = MIN(ioc->u.tag.datarate, parent->datarate);
93		/* Write constraints back */
94		ioc->u.tag.align = tag->align;
95		ioc->u.tag.bndry = tag->bndry;
96		ioc->u.tag.maxaddr = tag->maxaddr;
97		ioc->u.tag.maxsz = tag->maxsz;
98		ioc->u.tag.maxsegsz = tag->maxsegsz;
99		ioc->u.tag.nsegs = tag->nsegs;
100		ioc->u.tag.datarate = tag->datarate;
101	} else {
102		tag->align = ioc->u.tag.align;
103		tag->bndry = ioc->u.tag.bndry;
104		tag->maxaddr = ioc->u.tag.maxaddr;
105		tag->maxsz = ioc->u.tag.maxsz;
106		tag->maxsegsz = ioc->u.tag.maxsegsz;
107		tag->nsegs = ioc->u.tag.nsegs;
108		tag->datarate = ioc->u.tag.datarate;
109	}
110	LIST_INSERT_HEAD(&busdma->tags, tag, tags);
111	ioc->result = (uintptr_t)(void *)tag;
112	return (0);
113}
114
115static int
116proto_busdma_tag_destroy(struct proto_busdma *busdma, struct proto_tag *tag)
117{
118
119	if (!LIST_EMPTY(&tag->mds))
120		return (EBUSY);
121	if (!LIST_EMPTY(&tag->children))
122		return (EBUSY);
123
124	if (tag->parent != NULL) {
125		LIST_REMOVE(tag, peers);
126		tag->parent = NULL;
127	}
128	LIST_REMOVE(tag, tags);
129	free(tag, M_PROTO_BUSDMA);
130	return (0);
131}
132
133static struct proto_tag *
134proto_busdma_tag_lookup(struct proto_busdma *busdma, u_long key)
135{
136	struct proto_tag *tag;
137
138	LIST_FOREACH(tag, &busdma->tags, tags) {
139		if ((void *)tag == (void *)key)
140			return (tag);
141	}
142	return (NULL);
143}
144
145static int
146proto_busdma_md_destroy_internal(struct proto_busdma *busdma,
147    struct proto_md *md)
148{
149
150	LIST_REMOVE(md, mds);
151	LIST_REMOVE(md, peers);
152	if (md->physaddr)
153		bus_dmamap_unload(md->bd_tag, md->bd_map);
154	if (md->virtaddr != NULL)
155		bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
156	else
157		bus_dmamap_destroy(md->bd_tag, md->bd_map);
158	bus_dma_tag_destroy(md->bd_tag);
159	free(md, M_PROTO_BUSDMA);
160	return (0);
161}
162
163static void
164proto_busdma_mem_alloc_callback(void *arg, bus_dma_segment_t *segs, int	nseg,
165    int error)
166{
167	struct proto_callback_bundle *pcb = arg;
168
169	pcb->ioc->u.md.bus_nsegs = nseg;
170	pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
171}
172
173static int
174proto_busdma_mem_alloc(struct proto_busdma *busdma, struct proto_tag *tag,
175    struct proto_ioc_busdma *ioc)
176{
177	struct proto_callback_bundle pcb;
178	struct proto_md *md;
179	int error;
180
181	md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
182	md->tag = tag;
183
184	error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
185	    tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
186	    tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
187	if (error) {
188		free(md, M_PROTO_BUSDMA);
189		return (error);
190	}
191	error = bus_dmamem_alloc(md->bd_tag, &md->virtaddr, 0, &md->bd_map);
192	if (error) {
193		bus_dma_tag_destroy(md->bd_tag);
194		free(md, M_PROTO_BUSDMA);
195		return (error);
196	}
197	md->physaddr = pmap_kextract((uintptr_t)(md->virtaddr));
198	pcb.busdma = busdma;
199	pcb.md = md;
200	pcb.ioc = ioc;
201	error = bus_dmamap_load(md->bd_tag, md->bd_map, md->virtaddr,
202	    tag->maxsz, proto_busdma_mem_alloc_callback, &pcb, BUS_DMA_NOWAIT);
203	if (error) {
204		bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
205		bus_dma_tag_destroy(md->bd_tag);
206		free(md, M_PROTO_BUSDMA);
207		return (error);
208	}
209	LIST_INSERT_HEAD(&tag->mds, md, peers);
210	LIST_INSERT_HEAD(&busdma->mds, md, mds);
211	ioc->u.md.virt_addr = (uintptr_t)md->virtaddr;
212	ioc->u.md.virt_size = tag->maxsz;
213	ioc->u.md.phys_nsegs = 1;
214	ioc->u.md.phys_addr = md->physaddr;
215	ioc->result = (uintptr_t)(void *)md;
216	return (0);
217}
218
219static int
220proto_busdma_mem_free(struct proto_busdma *busdma, struct proto_md *md)
221{
222
223	if (md->virtaddr == NULL)
224		return (ENXIO);
225	return (proto_busdma_md_destroy_internal(busdma, md));
226}
227
228static int
229proto_busdma_md_create(struct proto_busdma *busdma, struct proto_tag *tag,
230    struct proto_ioc_busdma *ioc)
231{
232	struct proto_md *md;
233	int error;
234
235	md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
236	md->tag = tag;
237
238	error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
239	    tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
240	    tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
241	if (error) {
242		free(md, M_PROTO_BUSDMA);
243		return (error);
244	}
245	error = bus_dmamap_create(md->bd_tag, 0, &md->bd_map);
246	if (error) {
247		bus_dma_tag_destroy(md->bd_tag);
248		free(md, M_PROTO_BUSDMA);
249		return (error);
250	}
251
252	LIST_INSERT_HEAD(&tag->mds, md, peers);
253	LIST_INSERT_HEAD(&busdma->mds, md, mds);
254	ioc->result = (uintptr_t)(void *)md;
255	return (0);
256}
257
258static int
259proto_busdma_md_destroy(struct proto_busdma *busdma, struct proto_md *md)
260{
261
262	if (md->virtaddr != NULL)
263		return (ENXIO);
264	return (proto_busdma_md_destroy_internal(busdma, md));
265}
266
267static void
268proto_busdma_md_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
269    bus_size_t sz, int error)
270{
271	struct proto_callback_bundle *pcb = arg;
272
273	pcb->ioc->u.md.bus_nsegs = nseg;
274	pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
275}
276
277static int
278proto_busdma_md_load(struct proto_busdma *busdma, struct proto_md *md,
279    struct proto_ioc_busdma *ioc, struct thread *td)
280{
281	struct proto_callback_bundle pcb;
282	struct iovec iov;
283	struct uio uio;
284	pmap_t pmap;
285	int error;
286
287	iov.iov_base = (void *)(uintptr_t)ioc->u.md.virt_addr;
288	iov.iov_len = ioc->u.md.virt_size;
289	uio.uio_iov = &iov;
290	uio.uio_iovcnt = 1;
291	uio.uio_offset = 0;
292	uio.uio_resid = iov.iov_len;
293	uio.uio_segflg = UIO_USERSPACE;
294	uio.uio_rw = UIO_READ;
295	uio.uio_td = td;
296
297	pcb.busdma = busdma;
298	pcb.md = md;
299	pcb.ioc = ioc;
300	error = bus_dmamap_load_uio(md->bd_tag, md->bd_map, &uio,
301	    proto_busdma_md_load_callback, &pcb, BUS_DMA_NOWAIT);
302	if (error)
303		return (error);
304
305	/* XXX determine *all* physical memory segments */
306	pmap = vmspace_pmap(td->td_proc->p_vmspace);
307	md->physaddr = pmap_extract(pmap, ioc->u.md.virt_addr);
308	ioc->u.md.phys_nsegs = 1;	/* XXX */
309	ioc->u.md.phys_addr = md->physaddr;
310	return (0);
311}
312
313static int
314proto_busdma_md_unload(struct proto_busdma *busdma, struct proto_md *md)
315{
316
317	if (!md->physaddr)
318		return (ENXIO);
319	bus_dmamap_unload(md->bd_tag, md->bd_map);
320	md->physaddr = 0;
321	return (0);
322}
323
324static int
325proto_busdma_sync(struct proto_busdma *busdma, struct proto_md *md,
326    struct proto_ioc_busdma *ioc)
327{
328	u_int ops;
329
330	ops = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
331	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
332	if (ioc->u.sync.op & ~ops)
333		return (EINVAL);
334	if (!md->physaddr)
335		return (ENXIO);
336	bus_dmamap_sync(md->bd_tag, md->bd_map, ioc->u.sync.op);
337	return (0);
338}
339
340static struct proto_md *
341proto_busdma_md_lookup(struct proto_busdma *busdma, u_long key)
342{
343	struct proto_md *md;
344
345	LIST_FOREACH(md, &busdma->mds, mds) {
346		if ((void *)md == (void *)key)
347			return (md);
348	}
349	return (NULL);
350}
351
352struct proto_busdma *
353proto_busdma_attach(struct proto_softc *sc)
354{
355	struct proto_busdma *busdma;
356
357	busdma = malloc(sizeof(*busdma), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
358	return (busdma);
359}
360
361int
362proto_busdma_detach(struct proto_softc *sc, struct proto_busdma *busdma)
363{
364
365	proto_busdma_cleanup(sc, busdma);
366	free(busdma, M_PROTO_BUSDMA);
367	return (0);
368}
369
370int
371proto_busdma_cleanup(struct proto_softc *sc, struct proto_busdma *busdma)
372{
373	struct proto_md *md, *md1;
374	struct proto_tag *tag, *tag1;
375
376	LIST_FOREACH_SAFE(md, &busdma->mds, mds, md1)
377		proto_busdma_md_destroy_internal(busdma, md);
378	LIST_FOREACH_SAFE(tag, &busdma->tags, tags, tag1)
379		proto_busdma_tag_destroy(busdma, tag);
380	return (0);
381}
382
383int
384proto_busdma_ioctl(struct proto_softc *sc, struct proto_busdma *busdma,
385    struct proto_ioc_busdma *ioc, struct thread *td)
386{
387	struct proto_tag *tag;
388	struct proto_md *md;
389	int error;
390
391	error = 0;
392	switch (ioc->request) {
393	case PROTO_IOC_BUSDMA_TAG_CREATE:
394		busdma->bd_roottag = bus_get_dma_tag(sc->sc_dev);
395		error = proto_busdma_tag_create(busdma, NULL, ioc);
396		break;
397	case PROTO_IOC_BUSDMA_TAG_DERIVE:
398		tag = proto_busdma_tag_lookup(busdma, ioc->key);
399		if (tag == NULL) {
400			error = EINVAL;
401			break;
402		}
403		error = proto_busdma_tag_create(busdma, tag, ioc);
404		break;
405	case PROTO_IOC_BUSDMA_TAG_DESTROY:
406		tag = proto_busdma_tag_lookup(busdma, ioc->key);
407		if (tag == NULL) {
408			error = EINVAL;
409			break;
410		}
411		error = proto_busdma_tag_destroy(busdma, tag);
412		break;
413	case PROTO_IOC_BUSDMA_MEM_ALLOC:
414		tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
415		if (tag == NULL) {
416			error = EINVAL;
417			break;
418		}
419		error = proto_busdma_mem_alloc(busdma, tag, ioc);
420		break;
421	case PROTO_IOC_BUSDMA_MEM_FREE:
422		md = proto_busdma_md_lookup(busdma, ioc->key);
423		if (md == NULL) {
424			error = EINVAL;
425			break;
426		}
427		error = proto_busdma_mem_free(busdma, md);
428		break;
429	case PROTO_IOC_BUSDMA_MD_CREATE:
430		tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
431		if (tag == NULL) {
432			error = EINVAL;
433			break;
434		}
435		error = proto_busdma_md_create(busdma, tag, ioc);
436		break;
437	case PROTO_IOC_BUSDMA_MD_DESTROY:
438		md = proto_busdma_md_lookup(busdma, ioc->key);
439		if (md == NULL) {
440			error = EINVAL;
441			break;
442		}
443		error = proto_busdma_md_destroy(busdma, md);
444		break;
445	case PROTO_IOC_BUSDMA_MD_LOAD:
446		md = proto_busdma_md_lookup(busdma, ioc->key);
447		if (md == NULL) {
448			error = EINVAL;
449			break;
450		}
451		error = proto_busdma_md_load(busdma, md, ioc, td);
452		break;
453	case PROTO_IOC_BUSDMA_MD_UNLOAD:
454		md = proto_busdma_md_lookup(busdma, ioc->key);
455		if (md == NULL) {
456			error = EINVAL;
457			break;
458		}
459		error = proto_busdma_md_unload(busdma, md);
460		break;
461	case PROTO_IOC_BUSDMA_SYNC:
462		md = proto_busdma_md_lookup(busdma, ioc->key);
463		if (md == NULL) {
464			error = EINVAL;
465			break;
466		}
467		error = proto_busdma_sync(busdma, md, ioc);
468		break;
469	default:
470		error = EINVAL;
471		break;
472	}
473	return (error);
474}
475
476int
477proto_busdma_mmap_allowed(struct proto_busdma *busdma, vm_paddr_t physaddr)
478{
479	struct proto_md *md;
480
481	LIST_FOREACH(md, &busdma->mds, mds) {
482		if (physaddr >= trunc_page(md->physaddr) &&
483		    physaddr <= trunc_page(md->physaddr + md->tag->maxsz))
484			return (1);
485	}
486	return (0);
487}
488