busdma_machdep-v6.c revision 289854
1255736Sdavidch/*-
2265411Sdavidcs * Copyright (c) 2012-2014 Ian Lepore
3255736Sdavidch * Copyright (c) 2010 Mark Tinguely
4255736Sdavidch * Copyright (c) 2004 Olivier Houchard
5255736Sdavidch * Copyright (c) 2002 Peter Grehan
6255736Sdavidch * Copyright (c) 1997, 1998 Justin T. Gibbs.
7255736Sdavidch * All rights reserved.
8255736Sdavidch *
9255736Sdavidch * Redistribution and use in source and binary forms, with or without
10255736Sdavidch * modification, are permitted provided that the following conditions
11255736Sdavidch * are met:
12255736Sdavidch * 1. Redistributions of source code must retain the above copyright
13255736Sdavidch *    notice, this list of conditions, and the following disclaimer,
14255736Sdavidch *    without modification, immediately at the beginning of the file.
15255736Sdavidch * 2. The name of the author may not be used to endorse or promote products
16255736Sdavidch *    derived from this software without specific prior written permission.
17255736Sdavidch *
18255736Sdavidch * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19255736Sdavidch * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22255736Sdavidch * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23255736Sdavidch * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24255736Sdavidch * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25255736Sdavidch * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26255736Sdavidch * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27255736Sdavidch * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28255736Sdavidch * SUCH DAMAGE.
29255736Sdavidch *
30255736Sdavidch *  From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31255736Sdavidch */
32255736Sdavidch
33255736Sdavidch#include <sys/cdefs.h>
34255736Sdavidch__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 289854 2015-10-23 21:29:37Z ian $");
35255736Sdavidch
36255736Sdavidch#define _ARM32_BUS_DMA_PRIVATE
37255736Sdavidch#include <sys/param.h>
38255736Sdavidch#include <sys/kdb.h>
39255736Sdavidch#include <ddb/ddb.h>
40255736Sdavidch#include <ddb/db_output.h>
41255736Sdavidch#include <sys/systm.h>
42255736Sdavidch#include <sys/malloc.h>
43255736Sdavidch#include <sys/bus.h>
44255736Sdavidch#include <sys/busdma_bufalloc.h>
45255736Sdavidch#include <sys/counter.h>
46255736Sdavidch#include <sys/interrupt.h>
47255736Sdavidch#include <sys/kernel.h>
48255736Sdavidch#include <sys/ktr.h>
49255736Sdavidch#include <sys/lock.h>
50255736Sdavidch#include <sys/memdesc.h>
51255736Sdavidch#include <sys/proc.h>
52255736Sdavidch#include <sys/mutex.h>
53255736Sdavidch#include <sys/sysctl.h>
54281855Srodrigc#include <sys/uio.h>
55255736Sdavidch
56255736Sdavidch#include <vm/vm.h>
57255736Sdavidch#include <vm/vm_page.h>
58255736Sdavidch#include <vm/vm_map.h>
59255736Sdavidch#include <vm/vm_extern.h>
60255736Sdavidch#include <vm/vm_kern.h>
61266979Smarcel
62255736Sdavidch#include <machine/atomic.h>
63255736Sdavidch#include <machine/bus.h>
64255736Sdavidch#include <machine/cpu-v6.h>
65255736Sdavidch#include <machine/md_var.h>
66255736Sdavidch
67255736Sdavidch#define	MAX_BPAGES		64
68255736Sdavidch#define	MAX_DMA_SEGMENTS	4096
69255736Sdavidch#define	BUS_DMA_EXCL_BOUNCE	BUS_DMA_BUS2
70255736Sdavidch#define	BUS_DMA_ALIGN_BOUNCE	BUS_DMA_BUS3
71255736Sdavidch#define	BUS_DMA_COULD_BOUNCE	(BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
72255736Sdavidch#define	BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
73255736Sdavidch
74255736Sdavidchstruct bounce_zone;
75255736Sdavidch
76255736Sdavidchstruct bus_dma_tag {
77255736Sdavidch	bus_dma_tag_t		parent;
78255736Sdavidch	bus_size_t		alignment;
79255736Sdavidch	bus_addr_t		boundary;
80255736Sdavidch	bus_addr_t		lowaddr;
81255736Sdavidch	bus_addr_t		highaddr;
82255736Sdavidch	bus_dma_filter_t	*filter;
83255736Sdavidch	void			*filterarg;
84255736Sdavidch	bus_size_t		maxsize;
85255736Sdavidch	u_int			nsegments;
86255736Sdavidch	bus_size_t		maxsegsz;
87255736Sdavidch	int			flags;
88255736Sdavidch	int			ref_count;
89255736Sdavidch	int			map_count;
90255736Sdavidch	bus_dma_lock_t		*lockfunc;
91255736Sdavidch	void			*lockfuncarg;
92255736Sdavidch	struct bounce_zone	*bounce_zone;
93255736Sdavidch	/*
94255736Sdavidch	 * DMA range for this tag.  If the page doesn't fall within
95255736Sdavidch	 * one of these ranges, an error is returned.  The caller
96255736Sdavidch	 * may then decide what to do with the transfer.  If the
97255736Sdavidch	 * range pointer is NULL, it is ignored.
98255736Sdavidch	 */
99255736Sdavidch	struct arm32_dma_range	*ranges;
100255736Sdavidch	int			_nranges;
101255736Sdavidch};
102255736Sdavidch
103255736Sdavidchstruct bounce_page {
104255736Sdavidch	vm_offset_t	vaddr;		/* kva of bounce buffer */
105255736Sdavidch	bus_addr_t	busaddr;	/* Physical address */
106255736Sdavidch	vm_offset_t	datavaddr;	/* kva of client data */
107255736Sdavidch	vm_page_t	datapage;	/* physical page of client data */
108255736Sdavidch	vm_offset_t	dataoffs;	/* page offset of client data */
109255736Sdavidch	bus_size_t	datacount;	/* client data count */
110255736Sdavidch	STAILQ_ENTRY(bounce_page) links;
111255736Sdavidch};
112255736Sdavidch
113255736Sdavidchstruct sync_list {
114296071Sdavidcs	vm_offset_t	vaddr;		/* kva of client data */
115296071Sdavidcs	vm_page_t	pages;		/* starting page of client data */
116296071Sdavidcs	vm_offset_t	dataoffs;	/* page offset of client data */
117255736Sdavidch	bus_size_t	datacount;	/* client data count */
118255736Sdavidch};
119255736Sdavidch
120255736Sdavidchint busdma_swi_pending;
121255736Sdavidch
122255736Sdavidchstruct bounce_zone {
123255736Sdavidch	STAILQ_ENTRY(bounce_zone) links;
124255736Sdavidch	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
125255736Sdavidch	int		total_bpages;
126255736Sdavidch	int		free_bpages;
127255736Sdavidch	int		reserved_bpages;
128255736Sdavidch	int		active_bpages;
129255736Sdavidch	int		total_bounced;
130255736Sdavidch	int		total_deferred;
131255736Sdavidch	int		map_count;
132255736Sdavidch	bus_size_t	alignment;
133255736Sdavidch	bus_addr_t	lowaddr;
134255736Sdavidch	char		zoneid[8];
135255736Sdavidch	char		lowaddrid[20];
136255736Sdavidch	struct sysctl_ctx_list sysctl_tree;
137255736Sdavidch	struct sysctl_oid *sysctl_tree_top;
138255736Sdavidch};
139255736Sdavidch
140255736Sdavidchstatic struct mtx bounce_lock;
141255736Sdavidchstatic int total_bpages;
142255736Sdavidchstatic int busdma_zonecount;
143255736Sdavidchstatic uint32_t tags_total;
144255736Sdavidchstatic uint32_t maps_total;
145255736Sdavidchstatic uint32_t maps_dmamem;
146255736Sdavidchstatic uint32_t maps_coherent;
147255736Sdavidchstatic counter_u64_t maploads_total;
148255736Sdavidchstatic counter_u64_t maploads_bounced;
149255736Sdavidchstatic counter_u64_t maploads_coherent;
150255736Sdavidchstatic counter_u64_t maploads_dmamem;
151255736Sdavidchstatic counter_u64_t maploads_mbuf;
152255736Sdavidchstatic counter_u64_t maploads_physmem;
153255736Sdavidch
154255736Sdavidchstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
155255736Sdavidch
156255736SdavidchSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
157255736SdavidchSYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0,
158255736Sdavidch   "Number of active tags");
159255736SdavidchSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0,
160255736Sdavidch   "Number of active maps");
161255736SdavidchSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0,
162255736Sdavidch   "Number of active maps for bus_dmamem_alloc buffers");
163255736SdavidchSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0,
164255736Sdavidch   "Number of active maps with BUS_DMA_COHERENT flag set");
165255736SdavidchSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD,
166255736Sdavidch    &maploads_total, "Number of load operations performed");
167255736SdavidchSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD,
168255736Sdavidch    &maploads_bounced, "Number of load operations that used bounce buffers");
169255736SdavidchSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD,
170255736Sdavidch    &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory");
171321517SaeSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD,
172255736Sdavidch    &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers");
173255736SdavidchSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD,
174255736Sdavidch    &maploads_mbuf, "Number of load operations for mbufs");
175255736SdavidchSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD,
176255736Sdavidch    &maploads_physmem, "Number of load operations on physical buffers");
177255736SdavidchSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
178255736Sdavidch   "Total bounce pages");
179255736Sdavidch
180255736Sdavidchstruct bus_dmamap {
181255736Sdavidch	struct bp_list		bpages;
182255736Sdavidch	int			pagesneeded;
183255736Sdavidch	int			pagesreserved;
184255736Sdavidch	bus_dma_tag_t		dmat;
185255736Sdavidch	struct memdesc		mem;
186255736Sdavidch	bus_dmamap_callback_t	*callback;
187255736Sdavidch	void			*callback_arg;
188255736Sdavidch	int			flags;
189255736Sdavidch#define	DMAMAP_COHERENT		(1 << 0)
190255736Sdavidch#define	DMAMAP_DMAMEM_ALLOC	(1 << 1)
191255736Sdavidch#define	DMAMAP_MBUF		(1 << 2)
192255736Sdavidch	STAILQ_ENTRY(bus_dmamap) links;
193255736Sdavidch	bus_dma_segment_t	*segments;
194255736Sdavidch	int			sync_count;
195255736Sdavidch	struct sync_list	slist[];
196255736Sdavidch};
197255736Sdavidch
198255736Sdavidchstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
199255736Sdavidchstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
200255736Sdavidch
201255736Sdavidchstatic void init_bounce_pages(void *dummy);
202255736Sdavidchstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
203255736Sdavidchstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
204255736Sdavidchstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
205255736Sdavidch    int commit);
206255736Sdavidchstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
207255736Sdavidch    vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
208255736Sdavidchstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
209255736Sdavidchstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap,
210255736Sdavidch    bus_dmamap_t map, void *buf, bus_size_t buflen, int flags);
211255736Sdavidchstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
212255736Sdavidch    vm_paddr_t buf, bus_size_t buflen, int flags);
213255736Sdavidchstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
214255736Sdavidch    int flags);
215255736Sdavidchstatic void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size);
216255736Sdavidchstatic void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op);
217255736Sdavidch
218255736Sdavidchstatic busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
219255736Sdavidchstatic busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
220255736Sdavidchstatic void
221255736Sdavidchbusdma_init(void *dummy)
222255736Sdavidch{
223255736Sdavidch	int uma_flags;
224255736Sdavidch
225255736Sdavidch	maploads_total    = counter_u64_alloc(M_WAITOK);
226255736Sdavidch	maploads_bounced  = counter_u64_alloc(M_WAITOK);
227255736Sdavidch	maploads_coherent = counter_u64_alloc(M_WAITOK);
228255736Sdavidch	maploads_dmamem   = counter_u64_alloc(M_WAITOK);
229255736Sdavidch	maploads_mbuf     = counter_u64_alloc(M_WAITOK);
230255736Sdavidch	maploads_physmem  = counter_u64_alloc(M_WAITOK);
231255736Sdavidch
232255736Sdavidch	uma_flags = 0;
233255736Sdavidch
234255736Sdavidch	/* Create a cache of buffers in standard (cacheable) memory. */
235255736Sdavidch	standard_allocator = busdma_bufalloc_create("buffer",
236255736Sdavidch	    arm_dcache_align,	/* minimum_alignment */
237255736Sdavidch	    NULL,		/* uma_alloc func */
238255736Sdavidch	    NULL,		/* uma_free func */
239255736Sdavidch	    uma_flags);		/* uma_zcreate_flags */
240255736Sdavidch
241255736Sdavidch#ifdef INVARIANTS
242255736Sdavidch	/*
243255736Sdavidch	 * Force UMA zone to allocate service structures like
244255736Sdavidch	 * slabs using own allocator. uma_debug code performs
245255736Sdavidch	 * atomic ops on uma_slab_t fields and safety of this
246255736Sdavidch	 * operation is not guaranteed for write-back caches
247255736Sdavidch	 */
248255736Sdavidch	uma_flags = UMA_ZONE_OFFPAGE;
249255736Sdavidch#endif
250255736Sdavidch	/*
251255736Sdavidch	 * Create a cache of buffers in uncacheable memory, to implement the
252255736Sdavidch	 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
253255736Sdavidch	 */
254255736Sdavidch	coherent_allocator = busdma_bufalloc_create("coherent",
255255736Sdavidch	    arm_dcache_align,	/* minimum_alignment */
256255736Sdavidch	    busdma_bufalloc_alloc_uncacheable,
257255736Sdavidch	    busdma_bufalloc_free_uncacheable,
258255736Sdavidch	    uma_flags);	/* uma_zcreate_flags */
259255736Sdavidch}
260255736Sdavidch
261255736Sdavidch/*
262255736Sdavidch * This init historically used SI_SUB_VM, but now the init code requires
263255736Sdavidch * malloc(9) using M_DEVBUF memory and the pcpu zones for counter(9), which get
264255736Sdavidch * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by
265255736Sdavidch * using SI_SUB_KMEM+1.
266255736Sdavidch */
267255736SdavidchSYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL);
268255736Sdavidch
269255736Sdavidch/*
270255736Sdavidch * This routine checks the exclusion zone constraints from a tag against the
271255736Sdavidch * physical RAM available on the machine.  If a tag specifies an exclusion zone
272255736Sdavidch * but there's no RAM in that zone, then we avoid allocating resources to bounce
273255736Sdavidch * a request, and we can use any memory allocator (as opposed to needing
274255736Sdavidch * kmem_alloc_contig() just because it can allocate pages in an address range).
275255736Sdavidch *
276255736Sdavidch * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
277255736Sdavidch * same value on 32-bit architectures) as their lowaddr constraint, and we can't
278255736Sdavidch * possibly have RAM at an address higher than the highest address we can
279255736Sdavidch * express, so we take a fast out.
280255736Sdavidch */
281255736Sdavidchstatic int
282255736Sdavidchexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
283255736Sdavidch{
284255736Sdavidch	int i;
285255736Sdavidch
286255736Sdavidch	if (lowaddr >= BUS_SPACE_MAXADDR)
287255736Sdavidch		return (0);
288255736Sdavidch
289255736Sdavidch	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
290255736Sdavidch		if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) ||
291255736Sdavidch		    (lowaddr < phys_avail[i] && highaddr >= phys_avail[i]))
292255736Sdavidch			return (1);
293255736Sdavidch	}
294255736Sdavidch	return (0);
295255736Sdavidch}
296255736Sdavidch
297255736Sdavidch/*
298255736Sdavidch * Return true if the tag has an exclusion zone that could lead to bouncing.
299255736Sdavidch */
300255736Sdavidchstatic __inline int
301255736Sdavidchexclusion_bounce(bus_dma_tag_t dmat)
302255736Sdavidch{
303255736Sdavidch
304255736Sdavidch	return (dmat->flags & BUS_DMA_EXCL_BOUNCE);
305255736Sdavidch}
306255736Sdavidch
307255736Sdavidch/*
308255736Sdavidch * Return true if the given address does not fall on the alignment boundary.
309255736Sdavidch */
310255736Sdavidchstatic __inline int
311255736Sdavidchalignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
312255736Sdavidch{
313255736Sdavidch
314255736Sdavidch	return (addr & (dmat->alignment - 1));
315255736Sdavidch}
316255736Sdavidch
317255736Sdavidch/*
318255736Sdavidch * Return true if the DMA should bounce because the start or end does not fall
319255736Sdavidch * on a cacheline boundary (which would require a partial cacheline flush).
320255736Sdavidch * COHERENT memory doesn't trigger cacheline flushes.  Memory allocated by
321255736Sdavidch * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a
322255736Sdavidch * strict rule that such memory cannot be accessed by the CPU while DMA is in
323255736Sdavidch * progress (or by multiple DMA engines at once), so that it's always safe to do
324255736Sdavidch * full cacheline flushes even if that affects memory outside the range of a
325255736Sdavidch * given DMA operation that doesn't involve the full allocated buffer.  If we're
326255736Sdavidch * mapping an mbuf, that follows the same rules as a buffer we allocated.
327255736Sdavidch */
328255736Sdavidchstatic __inline int
329255736Sdavidchcacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
330255736Sdavidch{
331255736Sdavidch
332255736Sdavidch	if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF))
333255736Sdavidch		return (0);
334255736Sdavidch	return ((addr | size) & arm_dcache_align_mask);
335255736Sdavidch}
336255736Sdavidch
337255736Sdavidch/*
338255736Sdavidch * Return true if we might need to bounce the DMA described by addr and size.
339255736Sdavidch *
340255736Sdavidch * This is used to quick-check whether we need to do the more expensive work of
341255736Sdavidch * checking the DMA page-by-page looking for alignment and exclusion bounces.
342255736Sdavidch *
343255736Sdavidch * Note that the addr argument might be either virtual or physical.  It doesn't
344255736Sdavidch * matter because we only look at the low-order bits, which are the same in both
345255736Sdavidch * address spaces.
346255736Sdavidch */
347255736Sdavidchstatic __inline int
348255736Sdavidchmight_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
349255736Sdavidch    bus_size_t size)
350255736Sdavidch{
351255736Sdavidch
352255736Sdavidch	return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) ||
353255736Sdavidch	    alignment_bounce(dmat, addr) ||
354255736Sdavidch	    cacheline_bounce(map, addr, size));
355255736Sdavidch}
356255736Sdavidch
357255736Sdavidch/*
358255736Sdavidch * Return true if we must bounce the DMA described by paddr and size.
359255736Sdavidch *
360255736Sdavidch * Bouncing can be triggered by DMA that doesn't begin and end on cacheline
361255736Sdavidch * boundaries, or doesn't begin on an alignment boundary, or falls within the
362255736Sdavidch * exclusion zone of any tag in the ancestry chain.
363255736Sdavidch *
364255736Sdavidch * For exclusions, walk the chain of tags comparing paddr to the exclusion zone
365255736Sdavidch * within each tag.  If the tag has a filter function, use it to decide whether
366255736Sdavidch * the DMA needs to bounce, otherwise any DMA within the zone bounces.
367255736Sdavidch */
368255736Sdavidchstatic int
369255736Sdavidchmust_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
370255736Sdavidch    bus_size_t size)
371255736Sdavidch{
372255736Sdavidch
373255736Sdavidch	if (cacheline_bounce(map, paddr, size))
374255736Sdavidch		return (1);
375255736Sdavidch
376255736Sdavidch	/*
377255736Sdavidch	 *  The tag already contains ancestors' alignment restrictions so this
378255736Sdavidch	 *  check doesn't need to be inside the loop.
379255736Sdavidch	 */
380255736Sdavidch	if (alignment_bounce(dmat, paddr))
381255736Sdavidch		return (1);
382255736Sdavidch
383255736Sdavidch	/*
384255736Sdavidch	 * Even though each tag has an exclusion zone that is a superset of its
385255736Sdavidch	 * own and all its ancestors' exclusions, the exclusion zone of each tag
386255736Sdavidch	 * up the chain must be checked within the loop, because the busdma
387255736Sdavidch	 * rules say the filter function is called only when the address lies
388255736Sdavidch	 * within the low-highaddr range of the tag that filterfunc belongs to.
389255736Sdavidch	 */
390255736Sdavidch	while (dmat != NULL && exclusion_bounce(dmat)) {
391255736Sdavidch		if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
392255736Sdavidch		    (dmat->filter == NULL ||
393255736Sdavidch		    dmat->filter(dmat->filterarg, paddr) != 0))
394255736Sdavidch			return (1);
395255736Sdavidch		dmat = dmat->parent;
396255736Sdavidch	}
397255736Sdavidch
398255736Sdavidch	return (0);
399255736Sdavidch}
400255736Sdavidch
401255736Sdavidchstatic __inline struct arm32_dma_range *
402255736Sdavidch_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
403255736Sdavidch    bus_addr_t curaddr)
404255736Sdavidch{
405255736Sdavidch	struct arm32_dma_range *dr;
406255736Sdavidch	int i;
407255736Sdavidch
408255736Sdavidch	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
409255736Sdavidch		if (curaddr >= dr->dr_sysbase &&
410255736Sdavidch		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
411255736Sdavidch			return (dr);
412255736Sdavidch	}
413255736Sdavidch
414255736Sdavidch	return (NULL);
415255736Sdavidch}
416255736Sdavidch
417255736Sdavidch/*
418255736Sdavidch * Convenience function for manipulating driver locks from busdma (during
419255736Sdavidch * busdma_swi, for example).  Drivers that don't provide their own locks
420255736Sdavidch * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
421255736Sdavidch * non-mutex locking scheme don't have to use this at all.
422255736Sdavidch */
423255736Sdavidchvoid
424255736Sdavidchbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
425255736Sdavidch{
426255736Sdavidch	struct mtx *dmtx;
427255736Sdavidch
428255736Sdavidch	dmtx = (struct mtx *)arg;
429255736Sdavidch	switch (op) {
430255736Sdavidch	case BUS_DMA_LOCK:
431255736Sdavidch		mtx_lock(dmtx);
432255736Sdavidch		break;
433255736Sdavidch	case BUS_DMA_UNLOCK:
434255736Sdavidch		mtx_unlock(dmtx);
435255736Sdavidch		break;
436255736Sdavidch	default:
437255736Sdavidch		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
438255736Sdavidch	}
439255736Sdavidch}
440255736Sdavidch
441255736Sdavidch/*
442255736Sdavidch * dflt_lock should never get called.  It gets put into the dma tag when
443255736Sdavidch * lockfunc == NULL, which is only valid if the maps that are associated
444255736Sdavidch * with the tag are meant to never be defered.
445255736Sdavidch * XXX Should have a way to identify which driver is responsible here.
446255736Sdavidch */
447255736Sdavidchstatic void
448255736Sdavidchdflt_lock(void *arg, bus_dma_lock_op_t op)
449255736Sdavidch{
450255736Sdavidch
451255736Sdavidch	panic("driver error: busdma dflt_lock called");
452255736Sdavidch}
453255736Sdavidch
454255736Sdavidch/*
455255736Sdavidch * Allocate a device specific dma_tag.
456255736Sdavidch */
457255736Sdavidchint
458255736Sdavidchbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
459255736Sdavidch    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
460255736Sdavidch    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
461255736Sdavidch    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
462255736Sdavidch    void *lockfuncarg, bus_dma_tag_t *dmat)
463255736Sdavidch{
464255736Sdavidch	bus_dma_tag_t newtag;
465255736Sdavidch	int error = 0;
466255736Sdavidch
467255736Sdavidch#if 0
468255736Sdavidch	if (!parent)
469255736Sdavidch		parent = arm_root_dma_tag;
470255736Sdavidch#endif
471255736Sdavidch
472255736Sdavidch	/* Basic sanity checking. */
473255736Sdavidch	KASSERT(boundary == 0 || powerof2(boundary),
474255736Sdavidch	    ("dma tag boundary %lu, must be a power of 2", boundary));
475255736Sdavidch	KASSERT(boundary == 0 || boundary >= maxsegsz,
476255736Sdavidch	    ("dma tag boundary %lu is < maxsegsz %lu\n", boundary, maxsegsz));
477255736Sdavidch	KASSERT(alignment != 0 && powerof2(alignment),
478339881Sdavidcs	    ("dma tag alignment %lu, must be non-zero power of 2", alignment));
479339881Sdavidcs	KASSERT(maxsegsz != 0, ("dma tag maxsegsz must not be zero"));
480339881Sdavidcs
481339881Sdavidcs	/* Return a NULL tag on failure */
482255736Sdavidch	*dmat = NULL;
483255736Sdavidch
484255736Sdavidch	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
485255736Sdavidch	    M_ZERO | M_NOWAIT);
486255736Sdavidch	if (newtag == NULL) {
487255736Sdavidch		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
488255736Sdavidch		    __func__, newtag, 0, error);
489255736Sdavidch		return (ENOMEM);
490255736Sdavidch	}
491255736Sdavidch
492255736Sdavidch	newtag->parent = parent;
493255736Sdavidch	newtag->alignment = alignment;
494255736Sdavidch	newtag->boundary = boundary;
495255736Sdavidch	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
496255736Sdavidch	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
497255736Sdavidch	    (PAGE_SIZE - 1);
498255736Sdavidch	newtag->filter = filter;
499255736Sdavidch	newtag->filterarg = filterarg;
500255736Sdavidch	newtag->maxsize = maxsize;
501255736Sdavidch	newtag->nsegments = nsegments;
502255736Sdavidch	newtag->maxsegsz = maxsegsz;
503255736Sdavidch	newtag->flags = flags;
504255736Sdavidch	newtag->ref_count = 1; /* Count ourself */
505255736Sdavidch	newtag->map_count = 0;
506255736Sdavidch	newtag->ranges = bus_dma_get_range();
507255736Sdavidch	newtag->_nranges = bus_dma_get_range_nb();
508255736Sdavidch	if (lockfunc != NULL) {
509255736Sdavidch		newtag->lockfunc = lockfunc;
510255736Sdavidch		newtag->lockfuncarg = lockfuncarg;
511255736Sdavidch	} else {
512255736Sdavidch		newtag->lockfunc = dflt_lock;
513255736Sdavidch		newtag->lockfuncarg = NULL;
514255736Sdavidch	}
515255736Sdavidch
516255736Sdavidch	/* Take into account any restrictions imposed by our parent tag */
517255736Sdavidch	if (parent != NULL) {
518255736Sdavidch		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
519255736Sdavidch		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
520255736Sdavidch		newtag->alignment = MAX(parent->alignment, newtag->alignment);
521255736Sdavidch		newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
522255736Sdavidch		if (newtag->boundary == 0)
523255736Sdavidch			newtag->boundary = parent->boundary;
524255736Sdavidch		else if (parent->boundary != 0)
525255736Sdavidch			newtag->boundary = MIN(parent->boundary,
526255736Sdavidch					       newtag->boundary);
527255736Sdavidch		if (newtag->filter == NULL) {
528255736Sdavidch			/*
529255736Sdavidch			 * Short circuit to looking at our parent directly
530255736Sdavidch			 * since we have encapsulated all of its information
531255736Sdavidch			 */
532255736Sdavidch			newtag->filter = parent->filter;
533255736Sdavidch			newtag->filterarg = parent->filterarg;
534255736Sdavidch			newtag->parent = parent->parent;
535255736Sdavidch		}
536255736Sdavidch		if (newtag->parent != NULL)
537255736Sdavidch			atomic_add_int(&parent->ref_count, 1);
538255736Sdavidch	}
539255736Sdavidch
540255736Sdavidch	if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr))
541255736Sdavidch		newtag->flags |= BUS_DMA_EXCL_BOUNCE;
542255736Sdavidch	if (alignment_bounce(newtag, 1))
543255736Sdavidch		newtag->flags |= BUS_DMA_ALIGN_BOUNCE;
544255736Sdavidch
545255736Sdavidch	/*
546255736Sdavidch	 * Any request can auto-bounce due to cacheline alignment, in addition
547255736Sdavidch	 * to any alignment or boundary specifications in the tag, so if the
548255736Sdavidch	 * ALLOCNOW flag is set, there's always work to do.
549255736Sdavidch	 */
550255736Sdavidch	if ((flags & BUS_DMA_ALLOCNOW) != 0) {
551255736Sdavidch		struct bounce_zone *bz;
552255736Sdavidch		/*
553255736Sdavidch		 * Round size up to a full page, and add one more page because
554255736Sdavidch		 * there can always be one more boundary crossing than the
555255736Sdavidch		 * number of pages in a transfer.
556255736Sdavidch		 */
557255736Sdavidch		maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE;
558255736Sdavidch
559255736Sdavidch		if ((error = alloc_bounce_zone(newtag)) != 0) {
560255736Sdavidch			free(newtag, M_DEVBUF);
561255736Sdavidch			return (error);
562255736Sdavidch		}
563255736Sdavidch		bz = newtag->bounce_zone;
564255736Sdavidch
565255736Sdavidch		if (ptoa(bz->total_bpages) < maxsize) {
566255736Sdavidch			int pages;
567255736Sdavidch
568255736Sdavidch			pages = atop(maxsize) - bz->total_bpages;
569255736Sdavidch
570255736Sdavidch			/* Add pages to our bounce pool */
571255736Sdavidch			if (alloc_bounce_pages(newtag, pages) < pages)
572255736Sdavidch				error = ENOMEM;
573255736Sdavidch		}
574255736Sdavidch		/* Performed initial allocation */
575255736Sdavidch		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
576255736Sdavidch	} else
577255736Sdavidch		newtag->bounce_zone = NULL;
578255736Sdavidch
579285973Sdavidcs	if (error != 0) {
580255736Sdavidch		free(newtag, M_DEVBUF);
581255736Sdavidch	} else {
582255736Sdavidch		atomic_add_32(&tags_total, 1);
583255736Sdavidch		*dmat = newtag;
584255736Sdavidch	}
585255736Sdavidch	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
586255736Sdavidch	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
587255736Sdavidch	return (error);
588255736Sdavidch}
589255736Sdavidch
590255736Sdavidchint
591255736Sdavidchbus_dma_tag_destroy(bus_dma_tag_t dmat)
592255736Sdavidch{
593255736Sdavidch	bus_dma_tag_t dmat_copy;
594255736Sdavidch	int error;
595255736Sdavidch
596255736Sdavidch	error = 0;
597255736Sdavidch	dmat_copy = dmat;
598255736Sdavidch
599255736Sdavidch	if (dmat != NULL) {
600255736Sdavidch
601255736Sdavidch		if (dmat->map_count != 0) {
602255736Sdavidch			error = EBUSY;
603255736Sdavidch			goto out;
604255736Sdavidch		}
605255736Sdavidch
606255736Sdavidch		while (dmat != NULL) {
607255736Sdavidch			bus_dma_tag_t parent;
608255736Sdavidch
609255736Sdavidch			parent = dmat->parent;
610255736Sdavidch			atomic_subtract_int(&dmat->ref_count, 1);
611255736Sdavidch			if (dmat->ref_count == 0) {
612255736Sdavidch				atomic_subtract_32(&tags_total, 1);
613255736Sdavidch				free(dmat, M_DEVBUF);
614255736Sdavidch				/*
615255736Sdavidch				 * Last reference count, so
616255736Sdavidch				 * release our reference
617255736Sdavidch				 * count on our parent.
618255736Sdavidch				 */
619255736Sdavidch				dmat = parent;
620255736Sdavidch			} else
621255736Sdavidch				dmat = NULL;
622255736Sdavidch		}
623255736Sdavidch	}
624255736Sdavidchout:
625255736Sdavidch	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
626255736Sdavidch	return (error);
627255736Sdavidch}
628255736Sdavidch
629255736Sdavidchstatic int
630255736Sdavidchallocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
631255736Sdavidch{
632255736Sdavidch	struct bounce_zone *bz;
633255736Sdavidch	int maxpages;
634255736Sdavidch	int error;
635255736Sdavidch
636255736Sdavidch	if (dmat->bounce_zone == NULL)
637255736Sdavidch		if ((error = alloc_bounce_zone(dmat)) != 0)
638255736Sdavidch			return (error);
639255736Sdavidch	bz = dmat->bounce_zone;
640255736Sdavidch	/* Initialize the new map */
641255736Sdavidch	STAILQ_INIT(&(mapp->bpages));
642255736Sdavidch
643255736Sdavidch	/*
644255736Sdavidch	 * Attempt to add pages to our pool on a per-instance basis up to a sane
645255736Sdavidch	 * limit.  Even if the tag isn't flagged as COULD_BOUNCE due to
646255736Sdavidch	 * alignment and boundary constraints, it could still auto-bounce due to
647255736Sdavidch	 * cacheline alignment, which requires at most two bounce pages.
648255736Sdavidch	 */
649255736Sdavidch	if (dmat->flags & BUS_DMA_COULD_BOUNCE)
650255736Sdavidch		maxpages = MAX_BPAGES;
651255736Sdavidch	else
652307972Sdavidcs		maxpages = 2 * bz->map_count;
653307972Sdavidcs	if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
654307972Sdavidcs	    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
655255736Sdavidch		int pages;
656255736Sdavidch
657255736Sdavidch		pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
658255736Sdavidch		pages = MIN(maxpages - bz->total_bpages, pages);
659255736Sdavidch		pages = MAX(pages, 2);
660255736Sdavidch		if (alloc_bounce_pages(dmat, pages) < pages)
661255736Sdavidch			return (ENOMEM);
662255736Sdavidch
663255736Sdavidch		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
664255736Sdavidch			dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
665255736Sdavidch	}
666255736Sdavidch	bz->map_count++;
667255736Sdavidch	return (0);
668255736Sdavidch}
669255736Sdavidch
670255736Sdavidchstatic bus_dmamap_t
671255736Sdavidchallocate_map(bus_dma_tag_t dmat, int mflags)
672255736Sdavidch{
673255736Sdavidch	int mapsize, segsize;
674255736Sdavidch	bus_dmamap_t map;
675255736Sdavidch
676255736Sdavidch	/*
677255736Sdavidch	 * Allocate the map.  The map structure ends with an embedded
678255736Sdavidch	 * variable-sized array of sync_list structures.  Following that
679255736Sdavidch	 * we allocate enough extra space to hold the array of bus_dma_segments.
680255736Sdavidch	 */
681255736Sdavidch	KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
682255736Sdavidch	   ("cannot allocate %u dma segments (max is %u)",
683255736Sdavidch	    dmat->nsegments, MAX_DMA_SEGMENTS));
684255736Sdavidch	segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
685255736Sdavidch	mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
686255736Sdavidch	map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO);
687255736Sdavidch	if (map == NULL) {
688255736Sdavidch		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
689255736Sdavidch		return (NULL);
690255736Sdavidch	}
691255736Sdavidch	map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
692255736Sdavidch	return (map);
693255736Sdavidch}
694255736Sdavidch
695255736Sdavidch/*
696255736Sdavidch * Allocate a handle for mapping from kva/uva/physical
697255736Sdavidch * address space into bus device space.
698255736Sdavidch */
699255736Sdavidchint
700255736Sdavidchbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
701255736Sdavidch{
702255736Sdavidch	bus_dmamap_t map;
703255736Sdavidch	int error = 0;
704255736Sdavidch
705255736Sdavidch	*mapp = map = allocate_map(dmat, M_NOWAIT);
706255736Sdavidch	if (map == NULL) {
707255736Sdavidch		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
708255736Sdavidch		return (ENOMEM);
709255736Sdavidch	}
710255736Sdavidch
711255736Sdavidch	/*
712255736Sdavidch	 * Bouncing might be required if the driver asks for an exclusion
713255736Sdavidch	 * region, a data alignment that is stricter than 1, or DMA that begins
714255736Sdavidch	 * or ends with a partial cacheline.  Whether bouncing will actually
715255736Sdavidch	 * happen can't be known until mapping time, but we need to pre-allocate
716255736Sdavidch	 * resources now because we might not be allowed to at mapping time.
717255736Sdavidch	 */
718255736Sdavidch	error = allocate_bz_and_pages(dmat, map);
719255736Sdavidch	if (error != 0) {
720255736Sdavidch		free(map, M_DEVBUF);
721255736Sdavidch		*mapp = NULL;
722255736Sdavidch		return (error);
723255736Sdavidch	}
724258187Sedavis	if (map->flags & DMAMAP_COHERENT)
725258187Sedavis		atomic_add_32(&maps_coherent, 1);
726258187Sedavis	atomic_add_32(&maps_total, 1);
727255736Sdavidch	dmat->map_count++;
728255736Sdavidch
729255736Sdavidch	return (0);
730255736Sdavidch}
731255736Sdavidch
732255736Sdavidch/*
733255736Sdavidch * Destroy a handle for mapping from kva/uva/physical
734255736Sdavidch * address space into bus device space.
735255736Sdavidch */
736255736Sdavidchint
737255736Sdavidchbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
738255736Sdavidch{
739255736Sdavidch	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
740255736Sdavidch		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
741255736Sdavidch		    __func__, dmat, EBUSY);
742255736Sdavidch		return (EBUSY);
743255736Sdavidch	}
744255736Sdavidch	if (dmat->bounce_zone)
745255736Sdavidch		dmat->bounce_zone->map_count--;
746255736Sdavidch	if (map->flags & DMAMAP_COHERENT)
747255736Sdavidch		atomic_subtract_32(&maps_coherent, 1);
748255736Sdavidch	atomic_subtract_32(&maps_total, 1);
749255736Sdavidch	free(map, M_DEVBUF);
750255736Sdavidch	dmat->map_count--;
751255736Sdavidch	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
752255736Sdavidch	return (0);
753255736Sdavidch}
754255736Sdavidch
755255736Sdavidch
756255736Sdavidch/*
757255736Sdavidch * Allocate a piece of memory that can be efficiently mapped into
758255736Sdavidch * bus device space based on the constraints lited in the dma tag.
759255736Sdavidch * A dmamap to for use with dmamap_load is also allocated.
760255736Sdavidch */
761255736Sdavidchint
762255736Sdavidchbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
763255736Sdavidch    bus_dmamap_t *mapp)
764255736Sdavidch{
765255736Sdavidch	busdma_bufalloc_t ba;
766255736Sdavidch	struct busdma_bufzone *bufzone;
767255736Sdavidch	bus_dmamap_t map;
768255736Sdavidch	vm_memattr_t memattr;
769255736Sdavidch	int mflags;
770255736Sdavidch
771255736Sdavidch	if (flags & BUS_DMA_NOWAIT)
772255736Sdavidch		mflags = M_NOWAIT;
773255736Sdavidch	else
774255736Sdavidch		mflags = M_WAITOK;
775255736Sdavidch	if (flags & BUS_DMA_ZERO)
776255736Sdavidch		mflags |= M_ZERO;
777255736Sdavidch
778255736Sdavidch	*mapp = map = allocate_map(dmat, mflags);
779255736Sdavidch	if (map == NULL) {
780255736Sdavidch		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
781255736Sdavidch		    __func__, dmat, dmat->flags, ENOMEM);
782255736Sdavidch		return (ENOMEM);
783255736Sdavidch	}
784255736Sdavidch	map->flags = DMAMAP_DMAMEM_ALLOC;
785255736Sdavidch
786255736Sdavidch	/* Choose a busdma buffer allocator based on memory type flags. */
787255736Sdavidch	if (flags & BUS_DMA_COHERENT) {
788255736Sdavidch		memattr = VM_MEMATTR_UNCACHEABLE;
789255736Sdavidch		ba = coherent_allocator;
790255736Sdavidch		map->flags |= DMAMAP_COHERENT;
791255736Sdavidch	} else {
792255736Sdavidch		memattr = VM_MEMATTR_DEFAULT;
793255736Sdavidch		ba = standard_allocator;
794255736Sdavidch	}
795255736Sdavidch
796255736Sdavidch	/*
797255736Sdavidch	 * Try to find a bufzone in the allocator that holds a cache of buffers
798255736Sdavidch	 * of the right size for this request.  If the buffer is too big to be
799255736Sdavidch	 * held in the allocator cache, this returns NULL.
800255736Sdavidch	 */
801255736Sdavidch	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
802255736Sdavidch
803255736Sdavidch	/*
804255736Sdavidch	 * Allocate the buffer from the uma(9) allocator if...
805255736Sdavidch	 *  - It's small enough to be in the allocator (bufzone not NULL).
806255736Sdavidch	 *  - The alignment constraint isn't larger than the allocation size
807255736Sdavidch	 *    (the allocator aligns buffers to their size boundaries).
808255736Sdavidch	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
809255736Sdavidch	 * else allocate non-contiguous pages if...
810255736Sdavidch	 *  - The page count that could get allocated doesn't exceed nsegments.
811255736Sdavidch	 *  - The alignment constraint isn't larger than a page boundary.
812255736Sdavidch	 *  - There are no boundary-crossing constraints.
813255736Sdavidch	 * else allocate a block of contiguous pages because one or more of the
814255736Sdavidch	 * constraints is something that only the contig allocator can fulfill.
815255736Sdavidch	 */
816255736Sdavidch	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
817255736Sdavidch	    !exclusion_bounce(dmat)) {
818255736Sdavidch		*vaddr = uma_zalloc(bufzone->umazone, mflags);
819255736Sdavidch	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
820255736Sdavidch	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
821255736Sdavidch		*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
822255736Sdavidch		    mflags, 0, dmat->lowaddr, memattr);
823255736Sdavidch	} else {
824255736Sdavidch		*vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
825255736Sdavidch		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
826255736Sdavidch		    memattr);
827255736Sdavidch	}
828255736Sdavidch
829255736Sdavidch
830255736Sdavidch	if (*vaddr == NULL) {
831255736Sdavidch		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
832255736Sdavidch		    __func__, dmat, dmat->flags, ENOMEM);
833255736Sdavidch		free(map, M_DEVBUF);
834255736Sdavidch		*mapp = NULL;
835255736Sdavidch		return (ENOMEM);
836255736Sdavidch	}
837255736Sdavidch	if (map->flags & DMAMAP_COHERENT)
838255736Sdavidch		atomic_add_32(&maps_coherent, 1);
839255736Sdavidch	atomic_add_32(&maps_dmamem, 1);
840255736Sdavidch	atomic_add_32(&maps_total, 1);
841255736Sdavidch	dmat->map_count++;
842255736Sdavidch
843255736Sdavidch	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
844255736Sdavidch	    __func__, dmat, dmat->flags, 0);
845255736Sdavidch	return (0);
846255736Sdavidch}
847255736Sdavidch
848255736Sdavidch/*
849255736Sdavidch * Free a piece of memory and it's allociated dmamap, that was allocated
850255736Sdavidch * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
851255736Sdavidch */
852255736Sdavidchvoid
853255736Sdavidchbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
854255736Sdavidch{
855255736Sdavidch	struct busdma_bufzone *bufzone;
856255736Sdavidch	busdma_bufalloc_t ba;
857255736Sdavidch
858255736Sdavidch	if (map->flags & DMAMAP_COHERENT)
859255736Sdavidch		ba = coherent_allocator;
860255736Sdavidch	else
861255736Sdavidch		ba = standard_allocator;
862255736Sdavidch
863255736Sdavidch	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
864255736Sdavidch
865255736Sdavidch	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
866255736Sdavidch	    !exclusion_bounce(dmat))
867255736Sdavidch		uma_zfree(bufzone->umazone, vaddr);
868255736Sdavidch	else
869255736Sdavidch		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
870255736Sdavidch
871255736Sdavidch	dmat->map_count--;
872255736Sdavidch	if (map->flags & DMAMAP_COHERENT)
873255736Sdavidch		atomic_subtract_32(&maps_coherent, 1);
874255736Sdavidch	atomic_subtract_32(&maps_total, 1);
875255736Sdavidch	atomic_subtract_32(&maps_dmamem, 1);
876255736Sdavidch	free(map, M_DEVBUF);
877255736Sdavidch	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
878255736Sdavidch}
879255736Sdavidch
880255736Sdavidchstatic void
881255736Sdavidch_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
882255736Sdavidch    bus_size_t buflen, int flags)
883255736Sdavidch{
884255736Sdavidch	bus_addr_t curaddr;
885255736Sdavidch	bus_size_t sgsize;
886255736Sdavidch
887255736Sdavidch	if (map->pagesneeded == 0) {
888255736Sdavidch		CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
889255736Sdavidch		    " map= %p, pagesneeded= %d",
890255736Sdavidch		    dmat->lowaddr, dmat->boundary, dmat->alignment,
891255736Sdavidch		    map, map->pagesneeded);
892255736Sdavidch		/*
893255736Sdavidch		 * Count the number of bounce pages
894255736Sdavidch		 * needed in order to complete this transfer
895255736Sdavidch		 */
896255736Sdavidch		curaddr = buf;
897255736Sdavidch		while (buflen != 0) {
898255736Sdavidch			sgsize = MIN(buflen, dmat->maxsegsz);
899255736Sdavidch			if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
900255736Sdavidch				sgsize = MIN(sgsize,
901255736Sdavidch				    PAGE_SIZE - (curaddr & PAGE_MASK));
902255736Sdavidch				map->pagesneeded++;
903255736Sdavidch			}
904255736Sdavidch			curaddr += sgsize;
905255736Sdavidch			buflen -= sgsize;
906255736Sdavidch		}
907255736Sdavidch		CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
908255736Sdavidch	}
909255736Sdavidch}
910255736Sdavidch
911255736Sdavidchstatic void
912255736Sdavidch_bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map,
913255736Sdavidch    void *buf, bus_size_t buflen, int flags)
914255736Sdavidch{
915255736Sdavidch	vm_offset_t vaddr;
916255736Sdavidch	vm_offset_t vendaddr;
917255736Sdavidch	bus_addr_t paddr;
918255736Sdavidch
919255736Sdavidch	if (map->pagesneeded == 0) {
920255736Sdavidch		CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
921255736Sdavidch		    " map= %p, pagesneeded= %d",
922255736Sdavidch		    dmat->lowaddr, dmat->boundary, dmat->alignment,
923255736Sdavidch		    map, map->pagesneeded);
924255736Sdavidch		/*
925255736Sdavidch		 * Count the number of bounce pages
926255736Sdavidch		 * needed in order to complete this transfer
927255736Sdavidch		 */
928255736Sdavidch		vaddr = (vm_offset_t)buf;
929255736Sdavidch		vendaddr = (vm_offset_t)buf + buflen;
930255736Sdavidch
931255736Sdavidch		while (vaddr < vendaddr) {
932255736Sdavidch			if (__predict_true(pmap == kernel_pmap))
933255736Sdavidch				paddr = pmap_kextract(vaddr);
934255736Sdavidch			else
935255736Sdavidch				paddr = pmap_extract(pmap, vaddr);
936255736Sdavidch			if (must_bounce(dmat, map, paddr,
937255736Sdavidch			    min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
938255736Sdavidch			    PAGE_MASK)))) != 0) {
939255736Sdavidch				map->pagesneeded++;
940255736Sdavidch			}
941255736Sdavidch			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
942255736Sdavidch
943255736Sdavidch		}
944255736Sdavidch		CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
945255736Sdavidch	}
946255736Sdavidch}
947296071Sdavidcs
948255736Sdavidchstatic int
949255736Sdavidch_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
950255736Sdavidch{
951255736Sdavidch
952255736Sdavidch	/* Reserve Necessary Bounce Pages */
953255736Sdavidch	mtx_lock(&bounce_lock);
954255736Sdavidch	if (flags & BUS_DMA_NOWAIT) {
955255736Sdavidch		if (reserve_bounce_pages(dmat, map, 0) != 0) {
956255736Sdavidch			map->pagesneeded = 0;
957255736Sdavidch			mtx_unlock(&bounce_lock);
958255736Sdavidch			return (ENOMEM);
959255736Sdavidch		}
960255736Sdavidch	} else {
961255736Sdavidch		if (reserve_bounce_pages(dmat, map, 1) != 0) {
962255736Sdavidch			/* Queue us for resources */
963255736Sdavidch			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
964255736Sdavidch			mtx_unlock(&bounce_lock);
965255736Sdavidch			return (EINPROGRESS);
966255736Sdavidch		}
967255736Sdavidch	}
968255736Sdavidch	mtx_unlock(&bounce_lock);
969255736Sdavidch
970255736Sdavidch	return (0);
971255736Sdavidch}
972255736Sdavidch
973255736Sdavidch/*
974255736Sdavidch * Add a single contiguous physical range to the segment list.
975255736Sdavidch */
976255736Sdavidchstatic int
977255736Sdavidch_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
978255736Sdavidch    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
979255736Sdavidch{
980255736Sdavidch	bus_addr_t baddr, bmask;
981255736Sdavidch	int seg;
982255736Sdavidch
983255736Sdavidch	/*
984255736Sdavidch	 * Make sure we don't cross any boundaries.
985255736Sdavidch	 */
986255736Sdavidch	bmask = ~(dmat->boundary - 1);
987255736Sdavidch	if (dmat->boundary > 0) {
988255736Sdavidch		baddr = (curaddr + dmat->boundary) & bmask;
989255736Sdavidch		if (sgsize > (baddr - curaddr))
990255736Sdavidch			sgsize = (baddr - curaddr);
991255736Sdavidch	}
992255736Sdavidch
993255736Sdavidch	if (dmat->ranges) {
994255736Sdavidch		struct arm32_dma_range *dr;
995255736Sdavidch
996255736Sdavidch		dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
997255736Sdavidch		    curaddr);
998255736Sdavidch		if (dr == NULL) {
999255736Sdavidch			_bus_dmamap_unload(dmat, map);
1000255736Sdavidch			return (0);
1001255736Sdavidch		}
1002255736Sdavidch		/*
1003255736Sdavidch		 * In a valid DMA range.  Translate the physical
1004255736Sdavidch		 * memory address to an address in the DMA window.
1005255736Sdavidch		 */
1006255736Sdavidch		curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
1007255736Sdavidch	}
1008255736Sdavidch
1009255736Sdavidch	/*
1010255736Sdavidch	 * Insert chunk into a segment, coalescing with
1011255736Sdavidch	 * previous segment if possible.
1012255736Sdavidch	 */
1013255736Sdavidch	seg = *segp;
1014255736Sdavidch	if (seg == -1) {
1015255736Sdavidch		seg = 0;
1016255736Sdavidch		segs[seg].ds_addr = curaddr;
1017255736Sdavidch		segs[seg].ds_len = sgsize;
1018255736Sdavidch	} else {
1019255736Sdavidch		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
1020255736Sdavidch		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
1021255736Sdavidch		    (dmat->boundary == 0 ||
1022255736Sdavidch		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
1023255736Sdavidch			segs[seg].ds_len += sgsize;
1024255736Sdavidch		else {
1025255736Sdavidch			if (++seg >= dmat->nsegments)
1026255736Sdavidch				return (0);
1027255736Sdavidch			segs[seg].ds_addr = curaddr;
1028255736Sdavidch			segs[seg].ds_len = sgsize;
1029255736Sdavidch		}
1030255736Sdavidch	}
1031255736Sdavidch	*segp = seg;
1032255736Sdavidch	return (sgsize);
1033255736Sdavidch}
1034255736Sdavidch
1035255736Sdavidch/*
1036255736Sdavidch * Utility function to load a physical buffer.  segp contains
1037255736Sdavidch * the starting segment on entrace, and the ending segment on exit.
1038255736Sdavidch */
1039255736Sdavidchint
1040255736Sdavidch_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
1041255736Sdavidch    bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
1042255736Sdavidch{
1043255736Sdavidch	bus_addr_t curaddr;
1044255736Sdavidch	bus_addr_t sl_end = 0;
1045255736Sdavidch	bus_size_t sgsize;
1046255736Sdavidch	struct sync_list *sl;
1047255736Sdavidch	int error;
1048255736Sdavidch
1049255736Sdavidch	if (segs == NULL)
1050255736Sdavidch		segs = map->segments;
1051255736Sdavidch
1052255736Sdavidch	counter_u64_add(maploads_total, 1);
1053255736Sdavidch	counter_u64_add(maploads_physmem, 1);
1054255736Sdavidch
1055255736Sdavidch	if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1056255736Sdavidch		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1057255736Sdavidch		if (map->pagesneeded != 0) {
1058255736Sdavidch			counter_u64_add(maploads_bounced, 1);
1059255736Sdavidch			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1060255736Sdavidch			if (error)
1061255736Sdavidch				return (error);
1062255736Sdavidch		}
1063255736Sdavidch	}
1064255736Sdavidch
1065255736Sdavidch	sl = map->slist + map->sync_count - 1;
1066255736Sdavidch
1067255736Sdavidch	while (buflen > 0) {
1068255736Sdavidch		curaddr = buf;
1069255736Sdavidch		sgsize = MIN(buflen, dmat->maxsegsz);
1070255736Sdavidch		if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1071255736Sdavidch		    sgsize)) {
1072255736Sdavidch			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
1073255736Sdavidch			curaddr = add_bounce_page(dmat, map, 0, curaddr,
1074255736Sdavidch						  sgsize);
1075255736Sdavidch		} else {
1076255736Sdavidch			if (map->sync_count > 0)
1077255736Sdavidch				sl_end = VM_PAGE_TO_PHYS(sl->pages) +
1078255736Sdavidch				    sl->dataoffs + sl->datacount;
1079255736Sdavidch
1080255736Sdavidch			if (map->sync_count == 0 || curaddr != sl_end) {
1081255736Sdavidch				if (++map->sync_count > dmat->nsegments)
1082255736Sdavidch					break;
1083255736Sdavidch				sl++;
1084255736Sdavidch				sl->vaddr = 0;
1085255736Sdavidch				sl->datacount = sgsize;
1086255736Sdavidch				sl->pages = PHYS_TO_VM_PAGE(curaddr);
1087255736Sdavidch				sl->dataoffs = curaddr & PAGE_MASK;
1088255736Sdavidch			} else
1089255736Sdavidch				sl->datacount += sgsize;
1090255736Sdavidch		}
1091255736Sdavidch		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1092255736Sdavidch		    segp);
1093255736Sdavidch		if (sgsize == 0)
1094255736Sdavidch			break;
1095255736Sdavidch		buf += sgsize;
1096255736Sdavidch		buflen -= sgsize;
1097255736Sdavidch	}
1098255736Sdavidch
1099255736Sdavidch	/*
1100255736Sdavidch	 * Did we fit?
1101255736Sdavidch	 */
1102255736Sdavidch	if (buflen != 0) {
1103255736Sdavidch		_bus_dmamap_unload(dmat, map);
1104255736Sdavidch		return (EFBIG); /* XXX better return value here? */
1105255736Sdavidch	}
1106255736Sdavidch	return (0);
1107255736Sdavidch}
1108255736Sdavidch
1109255736Sdavidchint
1110255736Sdavidch_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
1111255736Sdavidch    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
1112255736Sdavidch    bus_dma_segment_t *segs, int *segp)
1113255736Sdavidch{
1114255736Sdavidch
1115255736Sdavidch	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
1116255736Sdavidch	    segs, segp));
1117255736Sdavidch}
1118255736Sdavidch
1119255736Sdavidch/*
1120255736Sdavidch * Utility function to load a linear buffer.  segp contains
1121255736Sdavidch * the starting segment on entrance, and the ending segment on exit.
1122255736Sdavidch */
1123255736Sdavidchint
1124255736Sdavidch_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
1125255736Sdavidch    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
1126255736Sdavidch    int *segp)
1127255736Sdavidch{
1128255736Sdavidch	bus_size_t sgsize;
1129255736Sdavidch	bus_addr_t curaddr;
1130255736Sdavidch	bus_addr_t sl_pend = 0;
1131255736Sdavidch	vm_offset_t kvaddr, vaddr, sl_vend = 0;
1132255736Sdavidch	struct sync_list *sl;
1133255736Sdavidch	int error;
1134255736Sdavidch
1135255736Sdavidch	counter_u64_add(maploads_total, 1);
1136255736Sdavidch	if (map->flags & DMAMAP_COHERENT)
1137255736Sdavidch		counter_u64_add(maploads_coherent, 1);
1138255736Sdavidch	if (map->flags & DMAMAP_DMAMEM_ALLOC)
1139255736Sdavidch		counter_u64_add(maploads_dmamem, 1);
1140255736Sdavidch
1141255736Sdavidch	if (segs == NULL)
1142255736Sdavidch		segs = map->segments;
1143255736Sdavidch
1144255736Sdavidch	if (flags & BUS_DMA_LOAD_MBUF) {
1145255736Sdavidch		counter_u64_add(maploads_mbuf, 1);
1146255736Sdavidch		map->flags |= DMAMAP_MBUF;
1147255736Sdavidch	}
1148255736Sdavidch
1149255736Sdavidch	if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1150255736Sdavidch		_bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags);
1151255736Sdavidch		if (map->pagesneeded != 0) {
1152255736Sdavidch			counter_u64_add(maploads_bounced, 1);
1153255736Sdavidch			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1154255736Sdavidch			if (error)
1155255736Sdavidch				return (error);
1156255736Sdavidch		}
1157255736Sdavidch	}
1158255736Sdavidch
1159255736Sdavidch	sl = map->slist + map->sync_count - 1;
1160255736Sdavidch	vaddr = (vm_offset_t)buf;
1161255736Sdavidch
1162255736Sdavidch	while (buflen > 0) {
1163255736Sdavidch		/*
1164255736Sdavidch		 * Get the physical address for this segment.
1165255736Sdavidch		 */
1166255736Sdavidch		if (__predict_true(pmap == kernel_pmap)) {
1167255736Sdavidch			curaddr = pmap_kextract(vaddr);
1168255736Sdavidch			kvaddr = vaddr;
1169255736Sdavidch		} else {
1170255736Sdavidch			curaddr = pmap_extract(pmap, vaddr);
1171255736Sdavidch			kvaddr = 0;
1172255736Sdavidch		}
1173255736Sdavidch
1174255736Sdavidch		/*
1175255736Sdavidch		 * Compute the segment size, and adjust counts.
1176255736Sdavidch		 */
1177255736Sdavidch		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
1178255736Sdavidch		if (sgsize > dmat->maxsegsz)
1179255736Sdavidch			sgsize = dmat->maxsegsz;
1180255736Sdavidch		if (buflen < sgsize)
1181255736Sdavidch			sgsize = buflen;
1182255736Sdavidch
1183255736Sdavidch		if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1184255736Sdavidch		    sgsize)) {
1185255736Sdavidch			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
1186255736Sdavidch						  sgsize);
1187255736Sdavidch		} else {
1188255736Sdavidch			if (map->sync_count > 0) {
1189255736Sdavidch				sl_pend = VM_PAGE_TO_PHYS(sl->pages) +
1190255736Sdavidch				    sl->dataoffs + sl->datacount;
1191255736Sdavidch				sl_vend = sl->vaddr + sl->datacount;
1192255736Sdavidch			}
1193255736Sdavidch
1194255736Sdavidch			if (map->sync_count == 0 ||
1195255736Sdavidch			    (kvaddr != 0 && kvaddr != sl_vend) ||
1196255736Sdavidch			    (curaddr != sl_pend)) {
1197255736Sdavidch
1198255736Sdavidch				if (++map->sync_count > dmat->nsegments)
1199255736Sdavidch					goto cleanup;
1200255736Sdavidch				sl++;
1201255736Sdavidch				sl->vaddr = kvaddr;
1202255736Sdavidch				sl->datacount = sgsize;
1203255736Sdavidch				sl->pages = PHYS_TO_VM_PAGE(curaddr);
1204255736Sdavidch				sl->dataoffs = curaddr & PAGE_MASK;
1205255736Sdavidch			} else
1206255736Sdavidch				sl->datacount += sgsize;
1207255736Sdavidch		}
1208255736Sdavidch		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1209255736Sdavidch		    segp);
1210255736Sdavidch		if (sgsize == 0)
1211255736Sdavidch			break;
1212255736Sdavidch		vaddr += sgsize;
1213255736Sdavidch		buflen -= sgsize;
1214255736Sdavidch	}
1215255736Sdavidch
1216255736Sdavidchcleanup:
1217255736Sdavidch	/*
1218255736Sdavidch	 * Did we fit?
1219255736Sdavidch	 */
1220255736Sdavidch	if (buflen != 0) {
1221255736Sdavidch		_bus_dmamap_unload(dmat, map);
1222255736Sdavidch		return (EFBIG); /* XXX better return value here? */
1223255736Sdavidch	}
1224255736Sdavidch	return (0);
1225255736Sdavidch}
1226255736Sdavidch
1227255736Sdavidchvoid
1228255736Sdavidch__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem,
1229255736Sdavidch    bus_dmamap_callback_t *callback, void *callback_arg)
1230255736Sdavidch{
1231255736Sdavidch
1232255736Sdavidch	map->mem = *mem;
1233255736Sdavidch	map->dmat = dmat;
1234255736Sdavidch	map->callback = callback;
1235255736Sdavidch	map->callback_arg = callback_arg;
1236255736Sdavidch}
1237255736Sdavidch
1238255736Sdavidchbus_dma_segment_t *
1239255736Sdavidch_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1240255736Sdavidch    bus_dma_segment_t *segs, int nsegs, int error)
1241255736Sdavidch{
1242255736Sdavidch
1243255736Sdavidch	if (segs == NULL)
1244255736Sdavidch		segs = map->segments;
1245255736Sdavidch	return (segs);
1246255736Sdavidch}
1247255736Sdavidch
1248255736Sdavidch/*
1249255736Sdavidch * Release the mapping held by map.
1250255736Sdavidch */
1251255736Sdavidchvoid
1252255736Sdavidch_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1253255736Sdavidch{
1254255736Sdavidch	struct bounce_page *bpage;
1255255736Sdavidch	struct bounce_zone *bz;
1256255736Sdavidch
1257255736Sdavidch	if ((bz = dmat->bounce_zone) != NULL) {
1258255736Sdavidch		while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1259255736Sdavidch			STAILQ_REMOVE_HEAD(&map->bpages, links);
1260255736Sdavidch			free_bounce_page(dmat, bpage);
1261255736Sdavidch		}
1262255736Sdavidch
1263255736Sdavidch		bz = dmat->bounce_zone;
1264255736Sdavidch		bz->free_bpages += map->pagesreserved;
1265255736Sdavidch		bz->reserved_bpages -= map->pagesreserved;
1266255736Sdavidch		map->pagesreserved = 0;
1267255736Sdavidch		map->pagesneeded = 0;
1268255736Sdavidch	}
1269255736Sdavidch	map->sync_count = 0;
1270255736Sdavidch	map->flags &= ~DMAMAP_MBUF;
1271255736Sdavidch}
1272255736Sdavidch
1273255736Sdavidchstatic void
1274255736Sdavidchdma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
1275255736Sdavidch{
1276255736Sdavidch	/*
1277255736Sdavidch	 * Write back any partial cachelines immediately before and
1278255736Sdavidch	 * after the DMA region.  We don't need to round the address
1279255736Sdavidch	 * down to the nearest cacheline or specify the exact size,
1280255736Sdavidch	 * as dcache_wb_poc() will do the rounding for us and works
1281255736Sdavidch	 * at cacheline granularity.
1282255736Sdavidch	 */
1283255736Sdavidch	if (va & cpuinfo.dcache_line_mask)
1284255736Sdavidch		dcache_wb_poc(va, pa, 1);
1285255736Sdavidch	if ((va + size) & cpuinfo.dcache_line_mask)
1286255736Sdavidch		dcache_wb_poc(va + size, pa + size, 1);
1287255736Sdavidch
1288255736Sdavidch	dcache_dma_preread(va, pa, size);
1289255736Sdavidch}
1290255736Sdavidch
1291255736Sdavidchstatic void
1292255736Sdavidchdma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
1293255736Sdavidch{
1294255736Sdavidch	uint32_t len, offset;
1295255736Sdavidch	vm_page_t m;
1296255736Sdavidch	vm_paddr_t pa;
1297255736Sdavidch	vm_offset_t va, tempva;
1298255736Sdavidch	bus_size_t size;
1299255736Sdavidch
1300255736Sdavidch	offset = sl->dataoffs;
1301255736Sdavidch	m = sl->pages;
1302255736Sdavidch	size = sl->datacount;
1303255736Sdavidch	pa = VM_PAGE_TO_PHYS(m) | offset;
1304255736Sdavidch
1305255736Sdavidch	for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
1306255736Sdavidch		tempva = 0;
1307255736Sdavidch		if (sl->vaddr == 0) {
1308255736Sdavidch			len = min(PAGE_SIZE - offset, size);
1309255736Sdavidch			tempva = pmap_quick_enter_page(m);
1310255736Sdavidch			va = tempva | offset;
1311255736Sdavidch		} else {
1312255736Sdavidch			len = sl->datacount;
1313255736Sdavidch			va = sl->vaddr;
1314255736Sdavidch		}
1315255736Sdavidch		KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
1316255736Sdavidch		    ("unexpected vm_page_t phys: 0x%08x != 0x%08x",
1317255736Sdavidch		    VM_PAGE_TO_PHYS(m) | offset, pa));
1318255736Sdavidch
1319255736Sdavidch		switch (op) {
1320255736Sdavidch		case BUS_DMASYNC_PREWRITE:
1321255736Sdavidch		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
1322255736Sdavidch			dcache_wb_poc(va, pa, len);
1323255736Sdavidch			break;
1324255736Sdavidch		case BUS_DMASYNC_PREREAD:
1325255736Sdavidch			/*
1326255736Sdavidch			 * An mbuf may start in the middle of a cacheline. There
1327255736Sdavidch			 * will be no cpu writes to the beginning of that line
1328255736Sdavidch			 * (which contains the mbuf header) while dma is in
1329255736Sdavidch			 * progress.  Handle that case by doing a writeback of
1330255736Sdavidch			 * just the first cacheline before invalidating the
1331255736Sdavidch			 * overall buffer.  Any mbuf in a chain may have this
1332255736Sdavidch			 * misalignment.  Buffers which are not mbufs bounce if
1333255736Sdavidch			 * they are not aligned to a cacheline.
1334255736Sdavidch			 */
1335255736Sdavidch			dma_preread_safe(va, pa, len);
1336255736Sdavidch			break;
1337266979Smarcel		case BUS_DMASYNC_POSTREAD:
1338255736Sdavidch		case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1339266979Smarcel			dcache_inv_poc(va, pa, len);
1340255736Sdavidch			break;
1341255736Sdavidch		default:
1342255736Sdavidch			panic("unsupported combination of sync operations: "
1343315881Sdavidcs                              "0x%08x\n", op);
1344255736Sdavidch		}
1345255736Sdavidch
1346255736Sdavidch		if (tempva != 0)
1347255736Sdavidch			pmap_quick_remove_page(tempva);
1348255736Sdavidch	}
1349255736Sdavidch}
1350255736Sdavidch
1351255736Sdavidchvoid
1352255736Sdavidch_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1353255736Sdavidch{
1354255736Sdavidch	struct bounce_page *bpage;
1355255736Sdavidch	struct sync_list *sl, *end;
1356255736Sdavidch	vm_offset_t datavaddr, tempvaddr;
1357255736Sdavidch
1358255736Sdavidch	if (op == BUS_DMASYNC_POSTWRITE)
1359255736Sdavidch		return;
1360255736Sdavidch
1361255736Sdavidch	/*
1362255736Sdavidch	 * If the buffer was from user space, it is possible that this is not
1363255736Sdavidch	 * the same vm map, especially on a POST operation.  It's not clear that
1364255736Sdavidch	 * dma on userland buffers can work at all right now.  To be safe, until
1365255736Sdavidch	 * we're able to test direct userland dma, panic on a map mismatch.
1366255736Sdavidch	 */
1367255736Sdavidch	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1368255736Sdavidch
1369255736Sdavidch		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1370284335Sdavidcs		    "performing bounce", __func__, dmat, dmat->flags, op);
1371255736Sdavidch
1372258187Sedavis		/*
1373255736Sdavidch		 * For PREWRITE do a writeback.  Clean the caches from the
1374255736Sdavidch		 * innermost to the outermost levels.
1375255736Sdavidch		 */
1376255736Sdavidch		if (op & BUS_DMASYNC_PREWRITE) {
1377255736Sdavidch			while (bpage != NULL) {
1378255736Sdavidch				tempvaddr = 0;
1379255736Sdavidch				datavaddr = bpage->datavaddr;
1380255736Sdavidch				if (datavaddr == 0) {
1381255736Sdavidch					tempvaddr = pmap_quick_enter_page(
1382255736Sdavidch					    bpage->datapage);
1383255736Sdavidch					datavaddr = tempvaddr | bpage->dataoffs;
1384255736Sdavidch				}
1385255736Sdavidch				bcopy((void *)datavaddr, (void *)bpage->vaddr,
1386255736Sdavidch				    bpage->datacount);
1387255736Sdavidch				if (tempvaddr != 0)
1388255736Sdavidch					pmap_quick_remove_page(tempvaddr);
1389255736Sdavidch				dcache_wb_poc(bpage->vaddr, bpage->busaddr,
1390255736Sdavidch				    bpage->datacount);
1391255736Sdavidch				bpage = STAILQ_NEXT(bpage, links);
1392255736Sdavidch			}
1393255736Sdavidch			dmat->bounce_zone->total_bounced++;
1394255736Sdavidch		}
1395339881Sdavidcs
1396339881Sdavidcs		/*
1397255736Sdavidch		 * Do an invalidate for PREREAD unless a writeback was already
1398255736Sdavidch		 * done above due to PREWRITE also being set.  The reason for a
1399255736Sdavidch		 * PREREAD invalidate is to prevent dirty lines currently in the
1400255736Sdavidch		 * cache from being evicted during the DMA.  If a writeback was
1401255736Sdavidch		 * done due to PREWRITE also being set there will be no dirty
1402255736Sdavidch		 * lines and the POSTREAD invalidate handles the rest. The
1403255736Sdavidch		 * invalidate is done from the innermost to outermost level. If
1404255736Sdavidch		 * L2 were done first, a dirty cacheline could be automatically
1405255736Sdavidch		 * evicted from L1 before we invalidated it, re-dirtying the L2.
1406255736Sdavidch		 */
1407255736Sdavidch		if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) {
1408255736Sdavidch			bpage = STAILQ_FIRST(&map->bpages);
1409255736Sdavidch			while (bpage != NULL) {
1410255736Sdavidch				dcache_dma_preread(bpage->vaddr, bpage->busaddr,
1411255736Sdavidch				    bpage->datacount);
1412255736Sdavidch				bpage = STAILQ_NEXT(bpage, links);
1413255736Sdavidch			}
1414255736Sdavidch		}
1415255736Sdavidch
1416255736Sdavidch		/*
1417255736Sdavidch		 * Re-invalidate the caches on a POSTREAD, even though they were
1418255736Sdavidch		 * already invalidated at PREREAD time.  Aggressive prefetching
1419255736Sdavidch		 * due to accesses to other data near the dma buffer could have
1420255736Sdavidch		 * brought buffer data into the caches which is now stale.  The
1421255736Sdavidch		 * caches are invalidated from the outermost to innermost; the
1422255736Sdavidch		 * prefetches could be happening right now, and if L1 were
1423255736Sdavidch		 * invalidated first, stale L2 data could be prefetched into L1.
1424255736Sdavidch		 */
1425255736Sdavidch		if (op & BUS_DMASYNC_POSTREAD) {
1426255736Sdavidch			while (bpage != NULL) {
1427255736Sdavidch				dcache_inv_poc(bpage->vaddr, bpage->busaddr,
1428255736Sdavidch				    bpage->datacount);
1429255736Sdavidch				tempvaddr = 0;
1430255736Sdavidch				datavaddr = bpage->datavaddr;
1431255736Sdavidch				if (datavaddr == 0) {
1432255736Sdavidch					tempvaddr = pmap_quick_enter_page(
1433255736Sdavidch					    bpage->datapage);
1434255736Sdavidch					datavaddr = tempvaddr | bpage->dataoffs;
1435255736Sdavidch				}
1436255736Sdavidch				bcopy((void *)bpage->vaddr, (void *)datavaddr,
1437255736Sdavidch				    bpage->datacount);
1438255736Sdavidch				if (tempvaddr != 0)
1439255736Sdavidch					pmap_quick_remove_page(tempvaddr);
1440255736Sdavidch				bpage = STAILQ_NEXT(bpage, links);
1441255736Sdavidch			}
1442255736Sdavidch			dmat->bounce_zone->total_bounced++;
1443255736Sdavidch		}
1444255736Sdavidch	}
1445255736Sdavidch
1446255736Sdavidch	/*
1447255736Sdavidch	 * For COHERENT memory no cache maintenance is necessary, but ensure all
1448255736Sdavidch	 * writes have reached memory for the PREWRITE case.  No action is
1449255736Sdavidch	 * needed for a PREREAD without PREWRITE also set, because that would
1450255736Sdavidch	 * imply that the cpu had written to the COHERENT buffer and expected
1451255736Sdavidch	 * the dma device to see that change, and by definition a PREWRITE sync
1452255736Sdavidch	 * is required to make that happen.
1453255736Sdavidch	 */
1454255736Sdavidch	if (map->flags & DMAMAP_COHERENT) {
1455255736Sdavidch		if (op & BUS_DMASYNC_PREWRITE) {
1456255736Sdavidch			dsb();
1457255736Sdavidch			cpu_l2cache_drain_writebuf();
1458255736Sdavidch		}
1459255736Sdavidch		return;
1460255736Sdavidch	}
1461255736Sdavidch
1462255736Sdavidch	/*
1463255736Sdavidch	 * Cache maintenance for normal (non-COHERENT non-bounce) buffers.  All
1464255736Sdavidch	 * the comments about the sequences for flushing cache levels in the
1465255736Sdavidch	 * bounce buffer code above apply here as well.  In particular, the fact
1466255736Sdavidch	 * that the sequence is inner-to-outer for PREREAD invalidation and
1467255736Sdavidch	 * outer-to-inner for POSTREAD invalidation is not a mistake.
1468255736Sdavidch	 */
1469255736Sdavidch	if (map->sync_count != 0) {
1470255736Sdavidch		sl = &map->slist[0];
1471255736Sdavidch		end = &map->slist[map->sync_count];
1472255736Sdavidch		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1473255736Sdavidch		    "performing sync", __func__, dmat, dmat->flags, op);
1474255736Sdavidch
1475255736Sdavidch		for ( ; sl != end; ++sl)
1476255736Sdavidch			dma_dcache_sync(sl, op);
1477255736Sdavidch	}
1478255736Sdavidch}
1479255736Sdavidch
1480255736Sdavidchstatic void
1481255736Sdavidchinit_bounce_pages(void *dummy __unused)
1482255736Sdavidch{
1483255736Sdavidch
1484255736Sdavidch	total_bpages = 0;
1485255736Sdavidch	STAILQ_INIT(&bounce_zone_list);
1486255736Sdavidch	STAILQ_INIT(&bounce_map_waitinglist);
1487255736Sdavidch	STAILQ_INIT(&bounce_map_callbacklist);
1488255736Sdavidch	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1489255736Sdavidch}
1490255736SdavidchSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1491255736Sdavidch
1492255736Sdavidchstatic struct sysctl_ctx_list *
1493255736Sdavidchbusdma_sysctl_tree(struct bounce_zone *bz)
1494255736Sdavidch{
1495255736Sdavidch
1496266979Smarcel	return (&bz->sysctl_tree);
1497255736Sdavidch}
1498255736Sdavidch
1499255736Sdavidchstatic struct sysctl_oid *
1500266979Smarcelbusdma_sysctl_tree_top(struct bounce_zone *bz)
1501255736Sdavidch{
1502255736Sdavidch
1503255736Sdavidch	return (bz->sysctl_tree_top);
1504255736Sdavidch}
1505255736Sdavidch
1506255736Sdavidchstatic int
1507266979Smarcelalloc_bounce_zone(bus_dma_tag_t dmat)
1508255736Sdavidch{
1509255736Sdavidch	struct bounce_zone *bz;
1510255736Sdavidch
1511266979Smarcel	/* Check to see if we already have a suitable zone */
1512255736Sdavidch	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1513255736Sdavidch		if ((dmat->alignment <= bz->alignment) &&
1514255736Sdavidch		    (dmat->lowaddr >= bz->lowaddr)) {
1515255736Sdavidch			dmat->bounce_zone = bz;
1516255736Sdavidch			return (0);
1517255736Sdavidch		}
1518255736Sdavidch	}
1519255736Sdavidch
1520255736Sdavidch	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1521255736Sdavidch	    M_NOWAIT | M_ZERO)) == NULL)
1522255736Sdavidch		return (ENOMEM);
1523255736Sdavidch
1524255736Sdavidch	STAILQ_INIT(&bz->bounce_page_list);
1525255736Sdavidch	bz->free_bpages = 0;
1526255736Sdavidch	bz->reserved_bpages = 0;
1527255736Sdavidch	bz->active_bpages = 0;
1528255736Sdavidch	bz->lowaddr = dmat->lowaddr;
1529255736Sdavidch	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1530255736Sdavidch	bz->map_count = 0;
1531255736Sdavidch	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1532255736Sdavidch	busdma_zonecount++;
1533255736Sdavidch	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1534255736Sdavidch	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1535255736Sdavidch	dmat->bounce_zone = bz;
1536255736Sdavidch
1537255736Sdavidch	sysctl_ctx_init(&bz->sysctl_tree);
1538255736Sdavidch	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1539255736Sdavidch	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1540255736Sdavidch	    CTLFLAG_RD, 0, "");
1541255736Sdavidch	if (bz->sysctl_tree_top == NULL) {
1542255736Sdavidch		sysctl_ctx_free(&bz->sysctl_tree);
1543255736Sdavidch		return (0);	/* XXX error code? */
1544255736Sdavidch	}
1545255736Sdavidch
1546255736Sdavidch	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1547255736Sdavidch	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1548255736Sdavidch	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1549255736Sdavidch	    "Total bounce pages");
1550255736Sdavidch	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1551339881Sdavidcs	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1552339881Sdavidcs	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1553339881Sdavidcs	    "Free bounce pages");
1554339881Sdavidcs	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1555339881Sdavidcs	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1556339881Sdavidcs	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1557339881Sdavidcs	    "Reserved bounce pages");
1558339881Sdavidcs	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1559339881Sdavidcs	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1560339881Sdavidcs	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1561255736Sdavidch	    "Active bounce pages");
1562255736Sdavidch	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1563255736Sdavidch	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1564255736Sdavidch	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1565255736Sdavidch	    "Total bounce requests (pages bounced)");
1566255736Sdavidch	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1567255736Sdavidch	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1568255736Sdavidch	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1569255736Sdavidch	    "Total bounce requests that were deferred");
1570255736Sdavidch	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1571255736Sdavidch	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1572255736Sdavidch	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1573255736Sdavidch	SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz),
1574255736Sdavidch	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1575255736Sdavidch	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1576255736Sdavidch
1577258187Sedavis	return (0);
1578255736Sdavidch}
1579255736Sdavidch
1580255736Sdavidchstatic int
1581255736Sdavidchalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1582255736Sdavidch{
1583255736Sdavidch	struct bounce_zone *bz;
1584255736Sdavidch	int count;
1585255736Sdavidch
1586255736Sdavidch	bz = dmat->bounce_zone;
1587255736Sdavidch	count = 0;
1588255736Sdavidch	while (numpages > 0) {
1589255736Sdavidch		struct bounce_page *bpage;
1590255736Sdavidch
1591255736Sdavidch		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1592255736Sdavidch		    M_NOWAIT | M_ZERO);
1593255736Sdavidch
1594255736Sdavidch		if (bpage == NULL)
1595255736Sdavidch			break;
1596255736Sdavidch		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1597255736Sdavidch		    M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1598255736Sdavidch		if (bpage->vaddr == 0) {
1599255736Sdavidch			free(bpage, M_DEVBUF);
1600255736Sdavidch			break;
1601255736Sdavidch		}
1602255736Sdavidch		bpage->busaddr = pmap_kextract(bpage->vaddr);
1603255736Sdavidch		mtx_lock(&bounce_lock);
1604255736Sdavidch		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1605255736Sdavidch		total_bpages++;
1606255736Sdavidch		bz->total_bpages++;
1607255736Sdavidch		bz->free_bpages++;
1608255736Sdavidch		mtx_unlock(&bounce_lock);
1609255736Sdavidch		count++;
1610255736Sdavidch		numpages--;
1611255736Sdavidch	}
1612255736Sdavidch	return (count);
1613255736Sdavidch}
1614255736Sdavidch
1615255736Sdavidchstatic int
1616255736Sdavidchreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1617255736Sdavidch{
1618255736Sdavidch	struct bounce_zone *bz;
1619255736Sdavidch	int pages;
1620255736Sdavidch
1621255736Sdavidch	mtx_assert(&bounce_lock, MA_OWNED);
1622255736Sdavidch	bz = dmat->bounce_zone;
1623255736Sdavidch	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1624255736Sdavidch	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1625255736Sdavidch		return (map->pagesneeded - (map->pagesreserved + pages));
1626255736Sdavidch	bz->free_bpages -= pages;
1627255736Sdavidch	bz->reserved_bpages += pages;
1628255736Sdavidch	map->pagesreserved += pages;
1629255736Sdavidch	pages = map->pagesneeded - map->pagesreserved;
1630255736Sdavidch
1631255736Sdavidch	return (pages);
1632255736Sdavidch}
1633255736Sdavidch
1634255736Sdavidchstatic bus_addr_t
1635255736Sdavidchadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1636255736Sdavidch    bus_addr_t addr, bus_size_t size)
1637255736Sdavidch{
1638255736Sdavidch	struct bounce_zone *bz;
1639255736Sdavidch	struct bounce_page *bpage;
1640255736Sdavidch
1641255736Sdavidch	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1642255736Sdavidch	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1643255736Sdavidch
1644255736Sdavidch	bz = dmat->bounce_zone;
1645255736Sdavidch	if (map->pagesneeded == 0)
1646255736Sdavidch		panic("add_bounce_page: map doesn't need any pages");
1647255736Sdavidch	map->pagesneeded--;
1648255736Sdavidch
1649255736Sdavidch	if (map->pagesreserved == 0)
1650255736Sdavidch		panic("add_bounce_page: map doesn't need any pages");
1651255736Sdavidch	map->pagesreserved--;
1652255736Sdavidch
1653255736Sdavidch	mtx_lock(&bounce_lock);
1654255736Sdavidch	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1655255736Sdavidch	if (bpage == NULL)
1656255736Sdavidch		panic("add_bounce_page: free page list is empty");
1657255736Sdavidch
1658255736Sdavidch	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1659255736Sdavidch	bz->reserved_bpages--;
1660255736Sdavidch	bz->active_bpages++;
1661255736Sdavidch	mtx_unlock(&bounce_lock);
1662255736Sdavidch
1663255736Sdavidch	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1664255736Sdavidch		/* Page offset needs to be preserved. */
1665255736Sdavidch		bpage->vaddr |= addr & PAGE_MASK;
1666255736Sdavidch		bpage->busaddr |= addr & PAGE_MASK;
1667255736Sdavidch	}
1668255736Sdavidch	bpage->datavaddr = vaddr;
1669255736Sdavidch	bpage->datapage = PHYS_TO_VM_PAGE(addr);
1670255736Sdavidch	bpage->dataoffs = addr & PAGE_MASK;
1671255736Sdavidch	bpage->datacount = size;
1672255736Sdavidch	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1673255736Sdavidch	return (bpage->busaddr);
1674255736Sdavidch}
1675255736Sdavidch
1676255736Sdavidchstatic void
1677255736Sdavidchfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1678255736Sdavidch{
1679255736Sdavidch	struct bus_dmamap *map;
1680255736Sdavidch	struct bounce_zone *bz;
1681255736Sdavidch
1682255736Sdavidch	bz = dmat->bounce_zone;
1683255736Sdavidch	bpage->datavaddr = 0;
1684255736Sdavidch	bpage->datacount = 0;
1685255736Sdavidch	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1686255736Sdavidch		/*
1687255736Sdavidch		 * Reset the bounce page to start at offset 0.  Other uses
1688255736Sdavidch		 * of this bounce page may need to store a full page of
1689255736Sdavidch		 * data and/or assume it starts on a page boundary.
1690255736Sdavidch		 */
1691255736Sdavidch		bpage->vaddr &= ~PAGE_MASK;
1692255736Sdavidch		bpage->busaddr &= ~PAGE_MASK;
1693255736Sdavidch	}
1694255736Sdavidch
1695255736Sdavidch	mtx_lock(&bounce_lock);
1696255736Sdavidch	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1697255736Sdavidch	bz->free_bpages++;
1698255736Sdavidch	bz->active_bpages--;
1699255736Sdavidch	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1700255736Sdavidch		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1701255736Sdavidch			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1702255736Sdavidch			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1703255736Sdavidch			    map, links);
1704255736Sdavidch			busdma_swi_pending = 1;
1705255736Sdavidch			bz->total_deferred++;
1706255736Sdavidch			swi_sched(vm_ih, 0);
1707255736Sdavidch		}
1708255736Sdavidch	}
1709255736Sdavidch	mtx_unlock(&bounce_lock);
1710255736Sdavidch}
1711255736Sdavidch
1712255736Sdavidchvoid
1713255736Sdavidchbusdma_swi(void)
1714255736Sdavidch{
1715255736Sdavidch	bus_dma_tag_t dmat;
1716255736Sdavidch	struct bus_dmamap *map;
1717255736Sdavidch
1718255736Sdavidch	mtx_lock(&bounce_lock);
1719255736Sdavidch	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1720255736Sdavidch		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1721255736Sdavidch		mtx_unlock(&bounce_lock);
1722255736Sdavidch		dmat = map->dmat;
1723255736Sdavidch		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1724255736Sdavidch		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1725255736Sdavidch		    map->callback_arg, BUS_DMA_WAITOK);
1726255736Sdavidch		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1727255736Sdavidch		mtx_lock(&bounce_lock);
1728255736Sdavidch	}
1729255736Sdavidch	mtx_unlock(&bounce_lock);
1730255736Sdavidch}
1731255736Sdavidch