bus_dma.c revision 1.69
1/* $NetBSD: bus_dma.c,v 1.69 2012/10/02 23:54:51 christos Exp $ */
2
3/*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
34
35__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.69 2012/10/02 23:54:51 christos Exp $");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/device.h>
41#include <sys/malloc.h>
42#include <sys/proc.h>
43#include <sys/mbuf.h>
44
45#include <uvm/uvm_extern.h>
46
47#define _ALPHA_BUS_DMA_PRIVATE
48#include <sys/bus.h>
49#include <machine/intr.h>
50
51#include <dev/bus_dma/bus_dmamem_common.h>
52
53int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
54	    bus_dmamap_t, void *, bus_size_t, struct vmspace *, int,
55	    paddr_t *, int *, int);
56
57extern paddr_t avail_start, avail_end;	/* from pmap.c */
58
59/*
60 * Common function for DMA map creation.  May be called by bus-specific
61 * DMA map creation functions.
62 */
63int
64_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
65    bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
66{
67	struct alpha_bus_dmamap *map;
68	void *mapstore;
69	size_t mapsize;
70
71	/*
72	 * Allocate and initialize the DMA map.  The end of the map
73	 * is a variable-sized array of segments, so we allocate enough
74	 * room for them in one shot.
75	 *
76	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
77	 * of ALLOCNOW notifies others that we've reserved these resources,
78	 * and they are not to be freed.
79	 *
80	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
81	 * the (nsegments - 1).
82	 */
83	mapsize = sizeof(struct alpha_bus_dmamap) +
84	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
85	if ((mapstore = malloc(mapsize, M_DMAMAP,
86	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
87		return (ENOMEM);
88
89	memset(mapstore, 0, mapsize);
90	map = (struct alpha_bus_dmamap *)mapstore;
91	map->_dm_size = size;
92	map->_dm_segcnt = nsegments;
93	map->_dm_maxmaxsegsz = maxsegsz;
94	if (t->_boundary != 0 && t->_boundary < boundary)
95		map->_dm_boundary = t->_boundary;
96	else
97		map->_dm_boundary = boundary;
98	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
99	map->dm_maxsegsz = maxsegsz;
100	map->dm_mapsize = 0;		/* no valid mappings */
101	map->dm_nsegs = 0;
102	map->_dm_window = NULL;
103
104	*dmamp = map;
105	return (0);
106}
107
108/*
109 * Common function for DMA map destruction.  May be called by bus-specific
110 * DMA map destruction functions.
111 */
112void
113_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
114{
115
116	free(map, M_DMAMAP);
117}
118
119/*
120 * Utility function to load a linear buffer.  lastaddrp holds state
121 * between invocations (for multiple-buffer loads).  segp contains
122 * the starting segment on entrance, and the ending segment on exit.
123 * first indicates if this is the first invocation of this function.
124 */
125int
126_bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map,
127    void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
128    int *segp, int first)
129{
130	bus_size_t sgsize;
131	bus_addr_t curaddr, lastaddr, baddr, bmask;
132	vaddr_t vaddr = (vaddr_t)buf;
133	int seg;
134
135	lastaddr = *lastaddrp;
136	bmask = ~(map->_dm_boundary - 1);
137
138	for (seg = *segp; buflen > 0 ; ) {
139		/*
140		 * Get the physical address for this segment.
141		 */
142		if (!VMSPACE_IS_KERNEL_P(vm))
143			(void) pmap_extract(vm->vm_map.pmap, vaddr, &curaddr);
144		else
145			curaddr = vtophys(vaddr);
146
147		/*
148		 * If we're beyond the current DMA window, indicate
149		 * that and try to fall back into SGMAPs.
150		 */
151		if (t->_wsize != 0 && curaddr >= t->_wsize)
152			return (EINVAL);
153
154		curaddr |= t->_wbase;
155
156		/*
157		 * Compute the segment size, and adjust counts.
158		 */
159		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
160		if (buflen < sgsize)
161			sgsize = buflen;
162		if (map->dm_maxsegsz < sgsize)
163			sgsize = map->dm_maxsegsz;
164
165		/*
166		 * Make sure we don't cross any boundaries.
167		 */
168		if (map->_dm_boundary > 0) {
169			baddr = (curaddr + map->_dm_boundary) & bmask;
170			if (sgsize > (baddr - curaddr))
171				sgsize = (baddr - curaddr);
172		}
173
174		/*
175		 * Insert chunk into a segment, coalescing with
176		 * the previous segment if possible.
177		 */
178		if (first) {
179			map->dm_segs[seg].ds_addr = curaddr;
180			map->dm_segs[seg].ds_len = sgsize;
181			first = 0;
182		} else {
183			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
184			    curaddr == lastaddr &&
185			    (map->dm_segs[seg].ds_len + sgsize) <=
186			     map->dm_maxsegsz &&
187			    (map->_dm_boundary == 0 ||
188			     (map->dm_segs[seg].ds_addr & bmask) ==
189			     (curaddr & bmask)))
190				map->dm_segs[seg].ds_len += sgsize;
191			else {
192				if (++seg >= map->_dm_segcnt)
193					break;
194				map->dm_segs[seg].ds_addr = curaddr;
195				map->dm_segs[seg].ds_len = sgsize;
196			}
197		}
198
199		lastaddr = curaddr + sgsize;
200		vaddr += sgsize;
201		buflen -= sgsize;
202	}
203
204	*segp = seg;
205	*lastaddrp = lastaddr;
206
207	/*
208	 * Did we fit?
209	 */
210	if (buflen != 0) {
211		/*
212		 * If there is a chained window, we will automatically
213		 * fall back to it.
214		 */
215		return (EFBIG);		/* XXX better return value here? */
216	}
217
218	return (0);
219}
220
221/*
222 * Common function for loading a direct-mapped DMA map with a linear
223 * buffer.  Called by bus-specific DMA map load functions with the
224 * OR value appropriate for indicating "direct-mapped" for that
225 * chipset.
226 */
227int
228_bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
229    bus_size_t buflen, struct proc *p, int flags)
230{
231	paddr_t lastaddr;
232	int seg, error;
233	struct vmspace *vm;
234
235	/*
236	 * Make sure that on error condition we return "no valid mappings".
237	 */
238	map->dm_mapsize = 0;
239	map->dm_nsegs = 0;
240	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
241	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
242
243	if (buflen > map->_dm_size)
244		return (EINVAL);
245
246	if (p != NULL) {
247		vm = p->p_vmspace;
248	} else {
249		vm = vmspace_kernel();
250	}
251	seg = 0;
252	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
253	    vm, flags, &lastaddr, &seg, 1);
254	if (error == 0) {
255		map->dm_mapsize = buflen;
256		map->dm_nsegs = seg + 1;
257		map->_dm_window = t;
258	} else if (t->_next_window != NULL) {
259		/*
260		 * Give the next window a chance.
261		 */
262		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
263		    p, flags);
264	}
265	return (error);
266}
267
268/*
269 * Like _bus_dmamap_load_direct(), but for mbufs.
270 */
271int
272_bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map,
273    struct mbuf *m0, int flags)
274{
275	paddr_t lastaddr;
276	int seg, error, first;
277	struct mbuf *m;
278
279	/*
280	 * Make sure that on error condition we return "no valid mappings."
281	 */
282	map->dm_mapsize = 0;
283	map->dm_nsegs = 0;
284	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
285	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
286
287#ifdef DIAGNOSTIC
288	if ((m0->m_flags & M_PKTHDR) == 0)
289		panic("_bus_dmamap_load_mbuf_direct: no packet header");
290#endif
291
292	if (m0->m_pkthdr.len > map->_dm_size)
293		return (EINVAL);
294
295	first = 1;
296	seg = 0;
297	error = 0;
298	for (m = m0; m != NULL && error == 0; m = m->m_next) {
299		if (m->m_len == 0)
300			continue;
301		/* XXX Could be better about coalescing. */
302		/* XXX Doesn't check boundaries. */
303		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
304		case M_EXT|M_EXT_CLUSTER:
305			/* XXX KDASSERT */
306			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
307			lastaddr = m->m_ext.ext_paddr +
308			    (m->m_data - m->m_ext.ext_buf);
309 have_addr:
310			if (first == 0 &&
311			    ++seg >= map->_dm_segcnt) {
312				error = EFBIG;
313				break;
314			}
315
316			/*
317			 * If we're beyond the current DMA window, indicate
318			 * that and try to fall back into SGMAPs.
319			 */
320			if (t->_wsize != 0 && lastaddr >= t->_wsize) {
321				error = EINVAL;
322				break;
323			}
324			lastaddr |= t->_wbase;
325
326			map->dm_segs[seg].ds_addr = lastaddr;
327			map->dm_segs[seg].ds_len = m->m_len;
328			lastaddr += m->m_len;
329			break;
330
331		case 0:
332			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
333			    (m->m_data - M_BUFADDR(m));
334			goto have_addr;
335
336		default:
337			error = _bus_dmamap_load_buffer_direct(t, map,
338			    m->m_data, m->m_len, vmspace_kernel(), flags,
339			    &lastaddr, &seg, first);
340		}
341		first = 0;
342	}
343	if (error == 0) {
344		map->dm_mapsize = m0->m_pkthdr.len;
345		map->dm_nsegs = seg + 1;
346		map->_dm_window = t;
347	} else if (t->_next_window != NULL) {
348		/*
349		 * Give the next window a chance.
350		 */
351		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
352	}
353	return (error);
354}
355
356/*
357 * Like _bus_dmamap_load_direct(), but for uios.
358 */
359int
360_bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map,
361    struct uio *uio, int flags)
362{
363	paddr_t lastaddr;
364	int seg, i, error, first;
365	bus_size_t minlen, resid;
366	struct vmspace *vm;
367	struct iovec *iov;
368	void *addr;
369
370	/*
371	 * Make sure that on error condition we return "no valid mappings."
372	 */
373	map->dm_mapsize = 0;
374	map->dm_nsegs = 0;
375	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
376	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
377
378	resid = uio->uio_resid;
379	iov = uio->uio_iov;
380
381	vm = uio->uio_vmspace;
382
383	first = 1;
384	seg = 0;
385	error = 0;
386	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
387		/*
388		 * Now at the first iovec to load.  Load each iovec
389		 * until we have exhausted the residual count.
390		 */
391		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
392		addr = (void *)iov[i].iov_base;
393
394		error = _bus_dmamap_load_buffer_direct(t, map,
395		    addr, minlen, vm, flags, &lastaddr, &seg, first);
396		first = 0;
397
398		resid -= minlen;
399	}
400	if (error == 0) {
401		map->dm_mapsize = uio->uio_resid;
402		map->dm_nsegs = seg + 1;
403		map->_dm_window = t;
404	} else if (t->_next_window != NULL) {
405		/*
406		 * Give the next window a chance.
407		 */
408		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
409	}
410	return (error);
411}
412
413/*
414 * Like _bus_dmamap_load_direct(), but for raw memory.
415 */
416int
417_bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map,
418    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
419{
420
421	panic("_bus_dmamap_load_raw_direct: not implemented");
422}
423
424/*
425 * Common function for unloading a DMA map.  May be called by
426 * chipset-specific DMA map unload functions.
427 */
428void
429_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
430{
431
432	/*
433	 * No resources to free; just mark the mappings as
434	 * invalid.
435	 */
436	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
437	map->dm_mapsize = 0;
438	map->dm_nsegs = 0;
439	map->_dm_window = NULL;
440	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
441}
442
443/*
444 * Common function for DMA map synchronization.  May be called
445 * by chipset-specific DMA map synchronization functions.
446 */
447void
448_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
449    bus_size_t len, int ops)
450{
451
452	/*
453	 * Flush the store buffer.
454	 */
455	alpha_mb();
456}
457
458/*
459 * Common function for DMA-safe memory allocation.  May be called
460 * by bus-specific DMA memory allocation functions.
461 */
462int
463_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
464    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
465    int flags)
466{
467
468	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
469	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
470}
471
472/*
473 * Allocate physical memory from the given physical address range.
474 * Called by DMA-safe memory allocation methods.
475 */
476int
477_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
478    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
479    int flags, paddr_t low, paddr_t high)
480{
481
482	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
483					       segs, nsegs, rsegs, flags,
484					       low, high));
485}
486
487/*
488 * Common function for freeing DMA-safe memory.  May be called by
489 * bus-specific DMA memory free functions.
490 */
491void
492_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
493{
494
495	_bus_dmamem_free_common(t, segs, nsegs);
496}
497
498/*
499 * Common function for mapping DMA-safe memory.  May be called by
500 * bus-specific DMA memory map functions.
501 */
502int
503_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
504    size_t size, void **kvap, int flags)
505{
506
507	/*
508	 * If we're only mapping 1 segment, use K0SEG, to avoid
509	 * TLB thrashing.
510	 */
511	if (nsegs == 1) {
512		*kvap = (void *)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
513		return (0);
514	}
515
516	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
517}
518
519/*
520 * Common function for unmapping DMA-safe memory.  May be called by
521 * bus-specific DMA memory unmapping functions.
522 */
523void
524_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
525{
526
527	/*
528	 * Nothing to do if we mapped it with K0SEG.
529	 */
530	if (kva >= (void *)ALPHA_K0SEG_BASE &&
531	    kva <= (void *)ALPHA_K0SEG_END)
532		return;
533
534	_bus_dmamem_unmap_common(t, kva, size);
535}
536
537/*
538 * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
539 * bus-specific DMA mmap(2)'ing functions.
540 */
541paddr_t
542_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
543    off_t off, int prot, int flags)
544{
545	bus_addr_t rv;
546
547	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
548	if (rv == (bus_addr_t)-1)
549		return (-1);
550
551	return (alpha_btop((char *)rv));
552}
553