1/* $NetBSD: isadma_bounce.c,v 1.19 2022/01/22 15:10:30 skrll Exp $ */
2/* NetBSD: isadma_bounce.c,v 1.2 2000/06/01 05:49:36 thorpej Exp  */
3
4/*-
5 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
35
36__KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.19 2022/01/22 15:10:30 skrll Exp $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/syslog.h>
41#include <sys/device.h>
42#include <sys/kmem.h>
43#include <sys/proc.h>
44#include <sys/mbuf.h>
45
46#define _ARC_BUS_DMA_PRIVATE
47#include <sys/bus.h>
48
49#include <dev/isa/isareg.h>
50#include <dev/isa/isavar.h>
51
52#include <uvm/uvm_extern.h>
53
54/*
55 * Cookie used by bouncing ISA DMA.  A pointer to one of these is stashed
56 * in the DMA map.
57 */
58struct isadma_bounce_cookie {
59	int	id_flags;		/* flags; see below */
60
61	/*
62	 * Information about the original buffer used during
63	 * DMA map syncs.  Note that origbuflen is only used
64	 * for ID_BUFTYPE_LINEAR.
65	 */
66	void	*id_origbuf;		/* pointer to orig buffer if
67					   bouncing */
68	bus_size_t id_origbuflen;	/* ...and size */
69	int	id_buftype;		/* type of buffer */
70
71	void	*id_bouncebuf;		/* pointer to the bounce buffer */
72	bus_size_t id_bouncebuflen;	/* ...and size */
73	int	id_nbouncesegs;		/* number of valid bounce segs */
74	bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
75					       physical memory segments */
76};
77
78/* id_flags */
79#define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
80#define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
81#define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
82
83/* id_buftype */
84#define	ID_BUFTYPE_INVALID	0
85#define	ID_BUFTYPE_LINEAR	1
86#define	ID_BUFTYPE_MBUF		2
87#define	ID_BUFTYPE_UIO		3
88#define	ID_BUFTYPE_RAW		4
89
90int	isadma_bounce_dmamap_create(bus_dma_tag_t, bus_size_t, int,
91	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
92void	isadma_bounce_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
93int	isadma_bounce_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
94	    bus_size_t, struct proc *, int);
95int	isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
96	    struct mbuf *, int);
97int	isadma_bounce_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
98	    struct uio *, int);
99int	isadma_bounce_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
100	    bus_dma_segment_t *, int, bus_size_t, int);
101void	isadma_bounce_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
102void	isadma_bounce_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
103	    bus_addr_t, bus_size_t, int);
104int	isadma_bounce_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
105	    bus_size_t, bus_dma_segment_t *, int, int *, int);
106
107static int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
108	    bus_size_t, int);
109static void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
110
111/*
112 * Returns true if the system memory configuration exceeds the
113 * capabilities of ISA DMA.
114 */
115static bool
116isadma_bounce_check_range(bus_dma_tag_t const t)
117{
118	return pmap_limits.avail_end > ISA_DMA_BOUNCE_THRESHOLD;
119}
120
121static int
122isadma_bounce_cookieflags(bus_dma_tag_t const t, bus_dmamap_t const map)
123{
124	int cookieflags = 0;
125
126	/*
127	 * ISA only has 24-bits of address space.  This means
128	 * we can't DMA to pages over 16M.  In order to DMA to
129	 * arbitrary buffers, we use "bounce buffers" - pages
130	 * in memory below the 16M boundary.  On DMA reads,
131	 * DMA happens to the bounce buffers, and is copied into
132	 * the caller's buffer.  On writes, data is copied into
133	 * the bounce buffer, and the DMA happens from those
134	 * pages.  To software using the DMA mapping interface,
135	 * this looks simply like a data cache.
136	 *
137	 * If we have more than 16M of RAM in the system, we may
138	 * need bounce buffers.  We check and remember that here.
139	 *
140	 * ...or, there is an opposite case.  The most segments
141	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
142	 * the caller can't handle that many segments (e.g. the
143	 * ISA DMA controller), we may have to bounce it as well.
144	 */
145	if (isadma_bounce_check_range(t) ||
146	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
147		cookieflags |= ID_MIGHT_NEED_BOUNCE;
148	}
149	return cookieflags;
150}
151
152static size_t
153isadma_bounce_cookiesize(bus_dmamap_t const map, int cookieflags)
154{
155	size_t cookiesize = sizeof(struct isadma_bounce_cookie);
156
157	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
158		cookiesize += (sizeof(bus_dma_segment_t) *
159		    (map->_dm_segcnt - 1));
160	}
161	return cookiesize;
162}
163
164static int
165isadma_bounce_cookie_alloc(bus_dma_tag_t const t, bus_dmamap_t const map,
166    int const flags)
167{
168	struct isadma_bounce_cookie *cookie;
169	int cookieflags = isadma_bounce_cookieflags(t, map);
170
171	if ((cookie = kmem_zalloc(isadma_bounce_cookiesize(map, cookieflags),
172	     (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
173		return ENOMEM;
174	}
175
176	cookie->id_flags = cookieflags;
177	map->_dm_cookie = cookie;
178
179	return 0;
180}
181
182static void
183isadma_bounce_cookie_free(bus_dmamap_t const map)
184{
185	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
186
187	if (cookie != NULL) {
188		kmem_free(map->_dm_cookie,
189		    isadma_bounce_cookiesize(map, cookie->id_flags));
190		map->_dm_cookie = NULL;
191	}
192}
193
194void
195isadma_bounce_tag_init(bus_dma_tag_t t)
196{
197	/*
198	 * Initialize the DMA tag used for ISA DMA.
199	 */
200
201	_bus_dma_tag_init(t);
202
203	t->_dmamap_create = isadma_bounce_dmamap_create;
204	t->_dmamap_destroy = isadma_bounce_dmamap_destroy;
205	t->_dmamap_load = isadma_bounce_dmamap_load;
206	t->_dmamap_load_mbuf = isadma_bounce_dmamap_load_mbuf;
207	t->_dmamap_load_uio = isadma_bounce_dmamap_load_uio;
208	t->_dmamap_load_raw = isadma_bounce_dmamap_load_raw;
209	t->_dmamap_unload = isadma_bounce_dmamap_unload;
210	t->_dmamap_sync = isadma_bounce_dmamap_sync;
211	t->_dmamem_alloc = isadma_bounce_dmamem_alloc;
212}
213
214/*
215 * Create an ISA DMA map.
216 */
217int
218isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
219    bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
220{
221	struct isadma_bounce_cookie *cookie;
222	bus_dmamap_t map;
223	int error;
224
225	/* Call common function to create the basic map. */
226	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
227	    flags, dmamp);
228	if (error)
229		return error;
230
231	map = *dmamp;
232	map->_dm_cookie = NULL;
233
234	/*
235	 * Allocate our cookie.
236	 */
237	if ((error = isadma_bounce_cookie_alloc(t, map, flags)) != 0) {
238		goto out;
239	}
240	cookie = map->_dm_cookie;
241
242	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
243		/*
244		 * Allocate the bounce pages now if the caller
245		 * wishes us to do so.
246		 */
247		if (flags & BUS_DMA_ALLOCNOW) {
248			error = isadma_bounce_alloc_bouncebuf(t, map, size,
249			    flags);
250		}
251	}
252
253 out:
254	if (error) {
255		isadma_bounce_cookie_free(map);
256		_bus_dmamap_destroy(t, map);
257	}
258	return error;
259}
260
261/*
262 * Destroy an ISA DMA map.
263 */
264void
265isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
266{
267	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
268
269	/*
270	 * Free any bounce pages this map might hold.
271	 */
272	if (cookie->id_flags & ID_HAS_BOUNCE)
273		isadma_bounce_free_bouncebuf(t, map);
274
275	isadma_bounce_cookie_free(map);
276	_bus_dmamap_destroy(t, map);
277}
278
279/*
280 * Load an ISA DMA map with a linear buffer.
281 */
282int
283isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
284    bus_size_t buflen, struct proc *p, int flags)
285{
286	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
287	int error;
288
289	/*
290	 * Make sure that on error condition we return "no valid mappings."
291	 */
292	map->dm_mapsize = 0;
293	map->dm_nsegs = 0;
294
295	/*
296	 * Try to load the map the normal way.  If this errors out,
297	 * and we can bounce, we will.
298	 */
299	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
300	if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)
301		return error;
302
303	/*
304	 * First attempt failed; bounce it.
305	 */
306
307	/*
308	 * Allocate bounce pages, if necessary.
309	 */
310	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
311		error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
312		if (error)
313			return error;
314	}
315
316	/*
317	 * Cache a pointer to the caller's buffer and load the DMA map
318	 * with the bounce buffer.
319	 */
320	cookie->id_origbuf = buf;
321	cookie->id_origbuflen = buflen;
322	cookie->id_buftype = ID_BUFTYPE_LINEAR;
323	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
324	    p, flags);
325	if (error) {
326		/*
327		 * Free the bounce pages, unless our resources
328		 * are reserved for our exclusive use.
329		 */
330		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
331			isadma_bounce_free_bouncebuf(t, map);
332		return error;
333	}
334
335	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
336	cookie->id_flags |= ID_IS_BOUNCING;
337	return 0;
338}
339
340/*
341 * Like isadma_bounce_dmamap_load(), but for mbufs.
342 */
343int
344isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
345    struct mbuf *m0, int flags)
346{
347	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
348	int error;
349
350	/*
351	 * Make sure on error condition we return "no valid mappings."
352	 */
353	map->dm_mapsize = 0;
354	map->dm_nsegs = 0;
355
356#ifdef DIAGNOSTIC
357	if ((m0->m_flags & M_PKTHDR) == 0)
358		panic("isadma_bounce_dmamap_load_mbuf: no packet header");
359#endif
360
361	if (m0->m_pkthdr.len > map->_dm_size)
362		return EINVAL;
363
364	/*
365	 * Try to load the map the normal way.  If this errors out,
366	 * and we can bounce, we will.
367	 */
368	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
369	if (error == 0 || (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)
370		return error;
371
372	/*
373	 * First attempt failed; bounce it.
374	 */
375
376	/*
377	 * Allocate bounce pages, if necessary.
378	 */
379	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
380		error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
381		    flags);
382		if (error)
383			return error;
384	}
385
386	/*
387	 * Cache a pointer to the caller's buffer and load the DMA map
388	 * with the bounce buffer.
389	 */
390	cookie->id_origbuf = m0;
391	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
392	cookie->id_buftype = ID_BUFTYPE_MBUF;
393	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
394	    m0->m_pkthdr.len, NULL, flags);
395	if (error) {
396		/*
397		 * Free the bounce pages, unless our resources
398		 * are reserved for our exclusive use.
399		 */
400		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
401			isadma_bounce_free_bouncebuf(t, map);
402		return error;
403	}
404
405	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
406	cookie->id_flags |= ID_IS_BOUNCING;
407	return 0;
408}
409
410/*
411 * Like isadma_bounce_dmamap_load(), but for uios.
412 */
413int
414isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
415    struct uio *uio, int flags)
416{
417
418	panic("isadma_bounce_dmamap_load_uio: not implemented");
419}
420
421/*
422 * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
423 * bus_dmamem_alloc().
424 */
425int
426isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
427    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
428{
429
430	panic("isadma_bounce_dmamap_load_raw: not implemented");
431}
432
433/*
434 * Unload an ISA DMA map.
435 */
436void
437isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
438{
439	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
440
441	/*
442	 * If we have bounce pages, free them, unless they're
443	 * reserved for our exclusive use.
444	 */
445	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
446	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
447		isadma_bounce_free_bouncebuf(t, map);
448
449	cookie->id_flags &= ~ID_IS_BOUNCING;
450	cookie->id_buftype = ID_BUFTYPE_INVALID;
451
452	/*
453	 * Do the generic bits of the unload.
454	 */
455	_bus_dmamap_unload(t, map);
456}
457
458/*
459 * Synchronize an ISA DMA map.
460 */
461void
462isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
463    bus_size_t len, int ops)
464{
465	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
466	void (*sync)(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
467
468	sync = _bus_dmamap_sync;
469
470	/*
471	 * Mixing PRE and POST operations is not allowed.
472	 */
473	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
474	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
475		panic("isadma_bounce_dmamap_sync: mix PRE and POST");
476
477#ifdef DIAGNOSTIC
478	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
479		if (offset >= map->dm_mapsize)
480			panic("isadma_bounce_dmamap_sync: bad offset");
481		if (len == 0 || (offset + len) > map->dm_mapsize)
482			panic("isadma_bounce_dmamap_sync: bad length");
483	}
484#endif
485
486	/*
487	 * If we're not bouncing, just drain the write buffer
488	 * and flush cache.
489	 */
490	if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
491		((*sync)(t, map, offset, len, ops));
492		return;
493	}
494
495	/*
496	 * XXX
497	 * This should be needed in BUS_DMASYNC_POSTREAD case only,
498	 * if _mips3_bus_dmamap_sync() used "Hit_Invalidate on POSTREAD",
499	 * rather than "Hit_Write_Back_Invalidate on PREREAD".
500	 */
501	if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD))
502		((*sync)(t, map, offset, len, ops));
503
504	switch (cookie->id_buftype) {
505	case ID_BUFTYPE_LINEAR:
506		/*
507		 * Nothing to do for pre-read.
508		 */
509
510		if (ops & BUS_DMASYNC_PREWRITE) {
511			/*
512			 * Copy the caller's buffer to the bounce buffer.
513			 */
514			memcpy((char *)cookie->id_bouncebuf + offset,
515			    (char *)cookie->id_origbuf + offset, len);
516		}
517
518		if (ops & BUS_DMASYNC_POSTREAD) {
519			/*
520			 * Copy the bounce buffer to the caller's buffer.
521			 */
522			memcpy((char *)cookie->id_origbuf + offset,
523			    (char *)cookie->id_bouncebuf + offset, len);
524		}
525
526		/*
527		 * Nothing to do for post-write.
528		 */
529		break;
530
531	case ID_BUFTYPE_MBUF:
532	    {
533		struct mbuf *m, *m0 = cookie->id_origbuf;
534		bus_size_t minlen, moff;
535
536		/*
537		 * Nothing to do for pre-read.
538		 */
539
540		if (ops & BUS_DMASYNC_PREWRITE) {
541			/*
542			 * Copy the caller's buffer to the bounce buffer.
543			 */
544			m_copydata(m0, offset, len,
545			    (char *)cookie->id_bouncebuf + offset);
546		}
547
548		if (ops & BUS_DMASYNC_POSTREAD) {
549			/*
550			 * Copy the bounce buffer to the caller's buffer.
551			 */
552			for (moff = offset, m = m0; m != NULL && len != 0;
553			     m = m->m_next) {
554				/* Find the beginning mbuf. */
555				if (moff >= m->m_len) {
556					moff -= m->m_len;
557					continue;
558				}
559
560				/*
561				 * Now at the first mbuf to sync; nail
562				 * each one until we have exhausted the
563				 * length.
564				 */
565				minlen = len < m->m_len - moff ?
566				    len : m->m_len - moff;
567
568				memcpy(mtod(m, char *) + moff,
569				    (char *)cookie->id_bouncebuf + offset,
570				    minlen);
571
572				moff = 0;
573				len -= minlen;
574				offset += minlen;
575			}
576		}
577
578		/*
579		 * Nothing to do for post-write.
580		 */
581		break;
582	    }
583
584	case ID_BUFTYPE_UIO:
585		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
586		break;
587
588	case ID_BUFTYPE_RAW:
589		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
590		break;
591
592	case ID_BUFTYPE_INVALID:
593		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
594		break;
595
596	default:
597		printf("unknown buffer type %d\n", cookie->id_buftype);
598		panic("isadma_bounce_dmamap_sync");
599	}
600
601	if (ops & BUS_DMASYNC_PREWRITE)
602		((*sync)(t, map, offset, len, ops));
603}
604
605/*
606 * Allocate memory safe for ISA DMA.
607 */
608int
609isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
610    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
611    int nsegs, int *rsegs, int flags)
612{
613	paddr_t high;
614
615	if (pmap_limits.avail_end > ISA_DMA_BOUNCE_THRESHOLD)
616		high = ISA_DMA_BOUNCE_THRESHOLD - 1;
617	else
618		high = pmap_limits.avail_end - 1;
619
620	return _bus_dmamem_alloc_range(t, size, alignment, boundary,
621	    segs, nsegs, rsegs, flags, 0, high);
622}
623
624/**********************************************************************
625 * ISA DMA utility functions
626 **********************************************************************/
627
628static int
629isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
630    bus_size_t size, int flags)
631{
632	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
633	int error = 0;
634
635	cookie->id_bouncebuflen = round_page(size);
636	error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen,
637	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
638	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
639	if (error)
640		goto out;
641	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
642	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
643	    (void **)&cookie->id_bouncebuf, flags);
644
645 out:
646	if (error) {
647		_bus_dmamem_free(t, cookie->id_bouncesegs,
648		    cookie->id_nbouncesegs);
649		cookie->id_bouncebuflen = 0;
650		cookie->id_nbouncesegs = 0;
651	} else
652		cookie->id_flags |= ID_HAS_BOUNCE;
653
654	return error;
655}
656
657static void
658isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
659{
660	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
661
662	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
663	    cookie->id_bouncebuflen);
664	_bus_dmamem_free(t, cookie->id_bouncesegs,
665	    cookie->id_nbouncesegs);
666	cookie->id_bouncebuflen = 0;
667	cookie->id_nbouncesegs = 0;
668	cookie->id_flags &= ~ID_HAS_BOUNCE;
669}
670