Deleted Added
full compact
busdma_machdep-v4.c (289857) busdma_machdep-v4.c (289862)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2004 Olivier Houchard
4 * Copyright (c) 2002 Peter Grehan
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 16 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2004 Olivier Houchard
4 * Copyright (c) 2002 Peter Grehan
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 16 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 289857 2015-10-23 22:51:48Z ian $");
33__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 289862 2015-10-24 02:18:14Z ian $");
34
35/*
36 * ARM bus dma support routines.
37 *
38 * XXX Things to investigate / fix some day...
39 * - What is the earliest that this API can be called? Could there be any
40 * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM?
41 * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the

--- 34 unchanged lines hidden (view full) ---

76#include <vm/vm_map.h>
77
78#include <machine/atomic.h>
79#include <machine/bus.h>
80#include <machine/cpufunc.h>
81#include <machine/md_var.h>
82
83#define MAX_BPAGES 64
34
35/*
36 * ARM bus dma support routines.
37 *
38 * XXX Things to investigate / fix some day...
39 * - What is the earliest that this API can be called? Could there be any
40 * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM?
41 * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the

--- 34 unchanged lines hidden (view full) ---

76#include <vm/vm_map.h>
77
78#include <machine/atomic.h>
79#include <machine/bus.h>
80#include <machine/cpufunc.h>
81#include <machine/md_var.h>
82
83#define MAX_BPAGES 64
84#define MAX_DMA_SEGMENTS 4096
84#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
85#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
86
87struct bounce_zone;
88
89struct bus_dma_tag {
90 bus_dma_tag_t parent;
91 bus_size_t alignment;

--- 14 unchanged lines hidden (view full) ---

106 /*
107 * DMA range for this tag. If the page doesn't fall within
108 * one of these ranges, an error is returned. The caller
109 * may then decide what to do with the transfer. If the
110 * range pointer is NULL, it is ignored.
111 */
112 struct arm32_dma_range *ranges;
113 int _nranges;
85#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
86#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
87
88struct bounce_zone;
89
90struct bus_dma_tag {
91 bus_dma_tag_t parent;
92 bus_size_t alignment;

--- 14 unchanged lines hidden (view full) ---

107 /*
108 * DMA range for this tag. If the page doesn't fall within
109 * one of these ranges, an error is returned. The caller
110 * may then decide what to do with the transfer. If the
111 * range pointer is NULL, it is ignored.
112 */
113 struct arm32_dma_range *ranges;
114 int _nranges;
114 /*
115 * Most tags need one or two segments, and can use the local tagsegs
116 * array. For tags with a larger limit, we'll allocate a bigger array
117 * on first use.
118 */
119 bus_dma_segment_t *segments;
120 bus_dma_segment_t tagsegs[2];
121};
122
123struct bounce_page {
124 vm_offset_t vaddr; /* kva of bounce buffer */
125 bus_addr_t busaddr; /* Physical address */
126 vm_offset_t datavaddr; /* kva of client data */
127 vm_page_t datapage; /* physical page of client data */
128 vm_offset_t dataoffs; /* page offset of client data */

--- 41 unchanged lines hidden (view full) ---

170 struct bp_list bpages;
171 int pagesneeded;
172 int pagesreserved;
173 bus_dma_tag_t dmat;
174 struct memdesc mem;
175 bus_dmamap_callback_t *callback;
176 void *callback_arg;
177 int flags;
115};
116
117struct bounce_page {
118 vm_offset_t vaddr; /* kva of bounce buffer */
119 bus_addr_t busaddr; /* Physical address */
120 vm_offset_t datavaddr; /* kva of client data */
121 vm_page_t datapage; /* physical page of client data */
122 vm_offset_t dataoffs; /* page offset of client data */

--- 41 unchanged lines hidden (view full) ---

164 struct bp_list bpages;
165 int pagesneeded;
166 int pagesreserved;
167 bus_dma_tag_t dmat;
168 struct memdesc mem;
169 bus_dmamap_callback_t *callback;
170 void *callback_arg;
171 int flags;
178#define DMAMAP_COHERENT 0x8
179#define DMAMAP_CACHE_ALIGNED 0x10
172#define DMAMAP_COHERENT (1 << 0)
173#define DMAMAP_DMAMEM_ALLOC (1 << 1)
174#define DMAMAP_MBUF (1 << 2)
175#define DMAMAP_CACHE_ALIGNED (1 << 3)
180 STAILQ_ENTRY(bus_dmamap) links;
176 STAILQ_ENTRY(bus_dmamap) links;
177 bus_dma_segment_t *segments;
181 int sync_count;
178 int sync_count;
182 struct sync_list *slist;
179 struct sync_list slist[];
183};
184
185static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
186static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
187
180};
181
182static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
183static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
184
188static struct mtx busdma_mtx;
189
190MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
191
192static void init_bounce_pages(void *dummy);
193static int alloc_bounce_zone(bus_dma_tag_t dmat);
194static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
195static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
196 int commit);
197static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
198 vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
199static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
200static void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op,
201 int bufaligned);
202
203/* Default tag, as most drivers provide no parent tag. */
204bus_dma_tag_t arm_root_dma_tag;
205
206/*
207 * ----------------------------------------------------------------------------
208 * Begin block of code useful to transplant to other implementations.
209 */
210
185static void init_bounce_pages(void *dummy);
186static int alloc_bounce_zone(bus_dma_tag_t dmat);
187static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
188static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
189 int commit);
190static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
191 vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
192static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
193static void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op,
194 int bufaligned);
195
196/* Default tag, as most drivers provide no parent tag. */
197bus_dma_tag_t arm_root_dma_tag;
198
199/*
200 * ----------------------------------------------------------------------------
201 * Begin block of code useful to transplant to other implementations.
202 */
203
211static uma_zone_t dmamap_zone; /* Cache of struct bus_dmamap items */
212
213static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
214static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
215
216MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
217MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
218
204static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
205static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
206
207MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
208MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
209
219/*
220 * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
221 * It'll need platform-specific changes if this code is copied.
222 */
223static int
224dmamap_ctor(void *mem, int size, void *arg, int flags)
225{
226 bus_dmamap_t map;
227 bus_dma_tag_t dmat;
228
229 map = (bus_dmamap_t)mem;
230 dmat = (bus_dma_tag_t)arg;
231
232 dmat->map_count++;
233
234 map->dmat = dmat;
235 map->flags = 0;
236 STAILQ_INIT(&map->bpages);
237
238 return (0);
239}
240
241/*
242 * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
243 * It may need platform-specific changes if this code is copied .
244 */
245static void
210static void
246dmamap_dtor(void *mem, int size, void *arg)
247{
248 bus_dmamap_t map;
249
250 map = (bus_dmamap_t)mem;
251
252 map->dmat->map_count--;
253}
254
255static void
256busdma_init(void *dummy)
257{
258
211busdma_init(void *dummy)
212{
213
259 /* Create a cache of maps for bus_dmamap_create(). */
260 dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
261 dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
262
263 /* Create a cache of buffers in standard (cacheable) memory. */
264 standard_allocator = busdma_bufalloc_create("buffer",
265 arm_dcache_align, /* minimum_alignment */
266 NULL, /* uma_alloc func */
267 NULL, /* uma_free func */
268 0); /* uma_zcreate_flags */
269

--- 5 unchanged lines hidden (view full) ---

275 arm_dcache_align, /* minimum_alignment */
276 busdma_bufalloc_alloc_uncacheable,
277 busdma_bufalloc_free_uncacheable,
278 0); /* uma_zcreate_flags */
279}
280
281/*
282 * This init historically used SI_SUB_VM, but now the init code requires
214
215 /* Create a cache of buffers in standard (cacheable) memory. */
216 standard_allocator = busdma_bufalloc_create("buffer",
217 arm_dcache_align, /* minimum_alignment */
218 NULL, /* uma_alloc func */
219 NULL, /* uma_free func */
220 0); /* uma_zcreate_flags */
221

--- 5 unchanged lines hidden (view full) ---

227 arm_dcache_align, /* minimum_alignment */
228 busdma_bufalloc_alloc_uncacheable,
229 busdma_bufalloc_free_uncacheable,
230 0); /* uma_zcreate_flags */
231}
232
233/*
234 * This init historically used SI_SUB_VM, but now the init code requires
283 * malloc(9) using M_BUSDMA memory, which is set up later than SI_SUB_VM, by
284 * SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using
285 * SI_SUB_KMEM and SI_ORDER_FOURTH.
235 * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get
236 * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by
237 * using SI_SUB_KMEM+1.
286 */
238 */
287SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
239SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL);
288
289/*
290 * End block of code useful to transplant to other implementations.
291 * ----------------------------------------------------------------------------
292 */
293
294/*
295 * Return true if a match is made.

--- 105 unchanged lines hidden (view full) ---

401#else
402 printf("DRIVER_ERROR: busdma dflt_lock called\n");
403#endif
404}
405
406/*
407 * Allocate a device specific dma_tag.
408 */
240
241/*
242 * End block of code useful to transplant to other implementations.
243 * ----------------------------------------------------------------------------
244 */
245
246/*
247 * Return true if a match is made.

--- 105 unchanged lines hidden (view full) ---

353#else
354 printf("DRIVER_ERROR: busdma dflt_lock called\n");
355#endif
356}
357
358/*
359 * Allocate a device specific dma_tag.
360 */
409#define SEG_NB 1024
410
411int
412bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
413 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
414 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
415 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
416 void *lockfuncarg, bus_dma_tag_t *dmat)
417{
418 bus_dma_tag_t newtag;

--- 27 unchanged lines hidden (view full) ---

446 newtag->_nranges = bus_dma_get_range_nb();
447 if (lockfunc != NULL) {
448 newtag->lockfunc = lockfunc;
449 newtag->lockfuncarg = lockfuncarg;
450 } else {
451 newtag->lockfunc = dflt_lock;
452 newtag->lockfuncarg = NULL;
453 }
361int
362bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
363 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
364 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
365 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
366 void *lockfuncarg, bus_dma_tag_t *dmat)
367{
368 bus_dma_tag_t newtag;

--- 27 unchanged lines hidden (view full) ---

396 newtag->_nranges = bus_dma_get_range_nb();
397 if (lockfunc != NULL) {
398 newtag->lockfunc = lockfunc;
399 newtag->lockfuncarg = lockfuncarg;
400 } else {
401 newtag->lockfunc = dflt_lock;
402 newtag->lockfuncarg = NULL;
403 }
454 /*
455 * If all the segments we need fit into the local tagsegs array, set the
456 * pointer now. Otherwise NULL the pointer and an array of segments
457 * will be allocated later, on first use. We don't pre-allocate now
458 * because some tags exist just to pass contraints to children in the
459 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
460 * sure don't want to try to allocate an array for that.
461 */
462 if (newtag->nsegments <= nitems(newtag->tagsegs))
463 newtag->segments = newtag->tagsegs;
464 else
465 newtag->segments = NULL;
466 /*
467 * Take into account any restrictions imposed by our parent tag
468 */
404
405 /* Take into account any restrictions imposed by our parent tag */
469 if (parent != NULL) {
470 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
471 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
472 if (newtag->boundary == 0)
473 newtag->boundary = parent->boundary;
474 else if (parent->boundary != 0)
475 newtag->boundary = MIN(parent->boundary,
476 newtag->boundary);

--- 64 unchanged lines hidden (view full) ---

541 return (EBUSY);
542
543 while (dmat != NULL) {
544 bus_dma_tag_t parent;
545
546 parent = dmat->parent;
547 atomic_subtract_int(&dmat->ref_count, 1);
548 if (dmat->ref_count == 0) {
406 if (parent != NULL) {
407 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
408 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
409 if (newtag->boundary == 0)
410 newtag->boundary = parent->boundary;
411 else if (parent->boundary != 0)
412 newtag->boundary = MIN(parent->boundary,
413 newtag->boundary);

--- 64 unchanged lines hidden (view full) ---

478 return (EBUSY);
479
480 while (dmat != NULL) {
481 bus_dma_tag_t parent;
482
483 parent = dmat->parent;
484 atomic_subtract_int(&dmat->ref_count, 1);
485 if (dmat->ref_count == 0) {
549 if (dmat->segments != NULL &&
550 dmat->segments != dmat->tagsegs)
551 free(dmat->segments, M_BUSDMA);
552 free(dmat, M_BUSDMA);
553 /*
554 * Last reference count, so
555 * release our reference
556 * count on our parent.
557 */
558 dmat = parent;
559 } else
560 dmat = NULL;
561 }
562 }
563 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
564
565 return (0);
566}
567
486 free(dmat, M_BUSDMA);
487 /*
488 * Last reference count, so
489 * release our reference
490 * count on our parent.
491 */
492 dmat = parent;
493 } else
494 dmat = NULL;
495 }
496 }
497 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
498
499 return (0);
500}
501
568#include <sys/kdb.h>
502static bus_dmamap_t
503allocate_map(bus_dma_tag_t dmat, int mflags)
504{
505 int mapsize, segsize;
506 bus_dmamap_t map;
507
508 /*
509 * Allocate the map. The map structure ends with an embedded
510 * variable-sized array of sync_list structures. Following that
511 * we allocate enough extra space to hold the array of bus_dma_segments.
512 */
513 KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
514 ("cannot allocate %u dma segments (max is %u)",
515 dmat->nsegments, MAX_DMA_SEGMENTS));
516 segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
517 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
518 map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO);
519 if (map == NULL) {
520 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
521 return (NULL);
522 }
523 map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
524 return (map);
525}
526
569/*
570 * Allocate a handle for mapping from kva/uva/physical
571 * address space into bus device space.
572 */
573int
574bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
575{
527/*
528 * Allocate a handle for mapping from kva/uva/physical
529 * address space into bus device space.
530 */
531int
532bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
533{
576 struct sync_list *slist;
577 bus_dmamap_t map;
578 int error = 0;
579
534 bus_dmamap_t map;
535 int error = 0;
536
580 slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
581 if (slist == NULL)
582 return (ENOMEM);
583
584 map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
585 *mapp = map;
537 *mapp = map = allocate_map(dmat, M_NOWAIT);
586 if (map == NULL) {
538 if (map == NULL) {
587 free(slist, M_BUSDMA);
539 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
588 return (ENOMEM);
589 }
590
591 /*
540 return (ENOMEM);
541 }
542
543 /*
592 * If the tag's segments haven't been allocated yet we need to do it
593 * now, because we can't sleep for resources at map load time.
544 * Bouncing might be required if the driver asks for an exclusion
545 * region, a data alignment that is stricter than 1, or DMA that begins
546 * or ends with a partial cacheline. Whether bouncing will actually
547 * happen can't be known until mapping time, but we need to pre-allocate
548 * resources now because we might not be allowed to at mapping time.
594 */
549 */
595 if (dmat->segments == NULL) {
596 dmat->segments = malloc(dmat->nsegments *
597 sizeof(*dmat->segments), M_BUSDMA, M_NOWAIT);
598 if (dmat->segments == NULL) {
599 free(slist, M_BUSDMA);
600 uma_zfree(dmamap_zone, map);
601 *mapp = NULL;
602 return (ENOMEM);
603 }
604 }
605
606 /*
607 * Bouncing might be required if the driver asks for an active
608 * exclusion region, a data alignment that is stricter than 1, and/or
609 * an active address boundary.
610 */
611 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
612
613 /* Must bounce */
614 struct bounce_zone *bz;
615 int maxpages;
616
617 if (dmat->bounce_zone == NULL) {
618 if ((error = alloc_bounce_zone(dmat)) != 0) {
550 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
551
552 /* Must bounce */
553 struct bounce_zone *bz;
554 int maxpages;
555
556 if (dmat->bounce_zone == NULL) {
557 if ((error = alloc_bounce_zone(dmat)) != 0) {
619 free(slist, M_BUSDMA);
620 uma_zfree(dmamap_zone, map);
558 free(map, M_BUSDMA);
621 *mapp = NULL;
622 return (error);
623 }
624 }
625 bz = dmat->bounce_zone;
626
627 /* Initialize the new map */
628 STAILQ_INIT(&((*mapp)->bpages));

--- 18 unchanged lines hidden (view full) ---

647 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
648 } else {
649 error = 0;
650 }
651 }
652 bz->map_count++;
653 }
654 map->sync_count = 0;
559 *mapp = NULL;
560 return (error);
561 }
562 }
563 bz = dmat->bounce_zone;
564
565 /* Initialize the new map */
566 STAILQ_INIT(&((*mapp)->bpages));

--- 18 unchanged lines hidden (view full) ---

585 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
586 } else {
587 error = 0;
588 }
589 }
590 bz->map_count++;
591 }
592 map->sync_count = 0;
655 map->slist = slist;
656 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
657 __func__, dmat, dmat->flags, error);
658
659 return (0);
660}
661
662/*
663 * Destroy a handle for mapping from kva/uva/physical
664 * address space into bus device space.
665 */
666int
667bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
668{
669
670 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
671 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
672 __func__, dmat, EBUSY);
673 return (EBUSY);
674 }
593 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
594 __func__, dmat, dmat->flags, error);
595
596 return (0);
597}
598
599/*
600 * Destroy a handle for mapping from kva/uva/physical
601 * address space into bus device space.
602 */
603int
604bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
605{
606
607 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
608 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
609 __func__, dmat, EBUSY);
610 return (EBUSY);
611 }
675 free(map->slist, M_BUSDMA);
676 uma_zfree(dmamap_zone, map);
677 if (dmat->bounce_zone)
678 dmat->bounce_zone->map_count--;
612 if (dmat->bounce_zone)
613 dmat->bounce_zone->map_count--;
614 free(map, M_BUSDMA);
615 dmat->map_count--;
679 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
616 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
680 return (0);
617 return (0);
681}
682
683/*
684 * Allocate a piece of memory that can be efficiently mapped into bus device
685 * space based on the constraints listed in the dma tag. Returns a pointer to
686 * the allocated memory, and a pointer to an associated bus_dmamap.
687 */
688int
618}
619
620/*
621 * Allocate a piece of memory that can be efficiently mapped into bus device
622 * space based on the constraints listed in the dma tag. Returns a pointer to
623 * the allocated memory, and a pointer to an associated bus_dmamap.
624 */
625int
689bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
626bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
690 bus_dmamap_t *mapp)
691{
627 bus_dmamap_t *mapp)
628{
692 struct sync_list *slist;
693 void * vaddr;
694 struct busdma_bufzone *bufzone;
695 busdma_bufalloc_t ba;
629 busdma_bufalloc_t ba;
630 struct busdma_bufzone *bufzone;
696 bus_dmamap_t map;
631 bus_dmamap_t map;
697 int mflags;
698 vm_memattr_t memattr;
632 vm_memattr_t memattr;
633 int mflags;
699
700 if (flags & BUS_DMA_NOWAIT)
701 mflags = M_NOWAIT;
702 else
703 mflags = M_WAITOK;
634
635 if (flags & BUS_DMA_NOWAIT)
636 mflags = M_NOWAIT;
637 else
638 mflags = M_WAITOK;
704 /*
705 * If the tag's segments haven't been allocated yet we need to do it
706 * now, because we can't sleep for resources at map load time.
707 */
708 if (dmat->segments == NULL)
709 dmat->segments = malloc(dmat->nsegments *
710 sizeof(*dmat->segments), M_BUSDMA, mflags);
639 if (flags & BUS_DMA_ZERO)
640 mflags |= M_ZERO;
711
641
712 slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
713 if (slist == NULL)
714 return (ENOMEM);
715 map = uma_zalloc_arg(dmamap_zone, dmat, mflags);
642 *mapp = map = allocate_map(dmat, mflags);
716 if (map == NULL) {
643 if (map == NULL) {
717 free(slist, M_BUSDMA);
644 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
645 __func__, dmat, dmat->flags, ENOMEM);
718 return (ENOMEM);
719 }
646 return (ENOMEM);
647 }
648 map->flags = DMAMAP_DMAMEM_ALLOC;
649
650 /* Choose a busdma buffer allocator based on memory type flags. */
720 if (flags & BUS_DMA_COHERENT) {
721 memattr = VM_MEMATTR_UNCACHEABLE;
722 ba = coherent_allocator;
723 map->flags |= DMAMAP_COHERENT;
724 } else {
725 memattr = VM_MEMATTR_DEFAULT;
726 ba = standard_allocator;
727 }
651 if (flags & BUS_DMA_COHERENT) {
652 memattr = VM_MEMATTR_UNCACHEABLE;
653 ba = coherent_allocator;
654 map->flags |= DMAMAP_COHERENT;
655 } else {
656 memattr = VM_MEMATTR_DEFAULT;
657 ba = standard_allocator;
658 }
728 /* All buffers we allocate are cache-aligned. */
729 map->flags |= DMAMAP_CACHE_ALIGNED;
730
659
731 if (flags & BUS_DMA_ZERO)
732 mflags |= M_ZERO;
733
734 /*
735 * Try to find a bufzone in the allocator that holds a cache of buffers
736 * of the right size for this request. If the buffer is too big to be
737 * held in the allocator cache, this returns NULL.
738 */
739 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
740
741 /*

--- 6 unchanged lines hidden (view full) ---

748 * - The page count that could get allocated doesn't exceed nsegments.
749 * - The alignment constraint isn't larger than a page boundary.
750 * - There are no boundary-crossing constraints.
751 * else allocate a block of contiguous pages because one or more of the
752 * constraints is something that only the contig allocator can fulfill.
753 */
754 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
755 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
660 /*
661 * Try to find a bufzone in the allocator that holds a cache of buffers
662 * of the right size for this request. If the buffer is too big to be
663 * held in the allocator cache, this returns NULL.
664 */
665 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
666
667 /*

--- 6 unchanged lines hidden (view full) ---

674 * - The page count that could get allocated doesn't exceed nsegments.
675 * - The alignment constraint isn't larger than a page boundary.
676 * - There are no boundary-crossing constraints.
677 * else allocate a block of contiguous pages because one or more of the
678 * constraints is something that only the contig allocator can fulfill.
679 */
680 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
681 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
756 vaddr = uma_zalloc(bufzone->umazone, mflags);
682 *vaddr = uma_zalloc(bufzone->umazone, mflags);
757 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
758 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
683 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
684 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
759 vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
685 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
760 mflags, 0, dmat->lowaddr, memattr);
761 } else {
686 mflags, 0, dmat->lowaddr, memattr);
687 } else {
762 vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
688 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
763 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
764 memattr);
765 }
689 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
690 memattr);
691 }
766 if (vaddr == NULL) {
767 free(slist, M_BUSDMA);
768 uma_zfree(dmamap_zone, map);
769 map = NULL;
770 } else {
771 map->slist = slist;
772 map->sync_count = 0;
692 if (*vaddr == NULL) {
693 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
694 __func__, dmat, dmat->flags, ENOMEM);
695 free(map, M_BUSDMA);
696 *mapp = NULL;
697 return (ENOMEM);
773 }
698 }
774 *vaddrp = vaddr;
775 *mapp = map;
699 dmat->map_count++;
776
700
777 return (vaddr == NULL ? ENOMEM : 0);
701 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
702 __func__, dmat, dmat->flags, 0);
703 return (0);
778}
779
780/*
781 * Free a piece of memory that was allocated via bus_dmamem_alloc, along with
782 * its associated map.
783 */
784void
785bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
786{
787 struct busdma_bufzone *bufzone;
788 busdma_bufalloc_t ba;
789
790 if (map->flags & DMAMAP_COHERENT)
791 ba = coherent_allocator;
792 else
793 ba = standard_allocator;
794
704}
705
706/*
707 * Free a piece of memory that was allocated via bus_dmamem_alloc, along with
708 * its associated map.
709 */
710void
711bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
712{
713 struct busdma_bufzone *bufzone;
714 busdma_bufalloc_t ba;
715
716 if (map->flags & DMAMAP_COHERENT)
717 ba = coherent_allocator;
718 else
719 ba = standard_allocator;
720
795 free(map->slist, M_BUSDMA);
796 uma_zfree(dmamap_zone, map);
797
798 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
799
800 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
801 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
802 uma_zfree(bufzone->umazone, vaddr);
803 else
804 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
721 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
722
723 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
724 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
725 uma_zfree(bufzone->umazone, vaddr);
726 else
727 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
728
729 dmat->map_count--;
730 free(map, M_BUSDMA);
731 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
805}
806
807static void
808_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
809 bus_size_t buflen, int flags)
810{
811 bus_addr_t curaddr;
812 bus_size_t sgsize;

--- 144 unchanged lines hidden (view full) ---

957{
958 struct sync_list *sl;
959 bus_size_t sgsize;
960 bus_addr_t curaddr;
961 bus_addr_t sl_end = 0;
962 int error;
963
964 if (segs == NULL)
732}
733
734static void
735_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
736 bus_size_t buflen, int flags)
737{
738 bus_addr_t curaddr;
739 bus_size_t sgsize;

--- 144 unchanged lines hidden (view full) ---

884{
885 struct sync_list *sl;
886 bus_size_t sgsize;
887 bus_addr_t curaddr;
888 bus_addr_t sl_end = 0;
889 int error;
890
891 if (segs == NULL)
965 segs = dmat->segments;
892 segs = map->segments;
966
967 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
968 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
969 if (map->pagesneeded != 0) {
970 error = _bus_dmamap_reserve_pages(dmat, map, flags);
971 if (error)
972 return (error);
973 }

--- 67 unchanged lines hidden (view full) ---

1041 bus_addr_t sl_pend = 0;
1042 struct sync_list *sl;
1043 vm_offset_t kvaddr;
1044 vm_offset_t vaddr = (vm_offset_t)buf;
1045 vm_offset_t sl_vend = 0;
1046 int error = 0;
1047
1048 if (segs == NULL)
893
894 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
895 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
896 if (map->pagesneeded != 0) {
897 error = _bus_dmamap_reserve_pages(dmat, map, flags);
898 if (error)
899 return (error);
900 }

--- 67 unchanged lines hidden (view full) ---

968 bus_addr_t sl_pend = 0;
969 struct sync_list *sl;
970 vm_offset_t kvaddr;
971 vm_offset_t vaddr = (vm_offset_t)buf;
972 vm_offset_t sl_vend = 0;
973 int error = 0;
974
975 if (segs == NULL)
1049 segs = dmat->segments;
976 segs = map->segments;
1050 if ((flags & BUS_DMA_LOAD_MBUF) != 0)
1051 map->flags |= DMAMAP_CACHE_ALIGNED;
1052
1053 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
1054 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
1055 if (map->pagesneeded != 0) {
1056 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1057 if (error)

--- 84 unchanged lines hidden (view full) ---

1142}
1143
1144bus_dma_segment_t *
1145_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1146 bus_dma_segment_t *segs, int nsegs, int error)
1147{
1148
1149 if (segs == NULL)
977 if ((flags & BUS_DMA_LOAD_MBUF) != 0)
978 map->flags |= DMAMAP_CACHE_ALIGNED;
979
980 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
981 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
982 if (map->pagesneeded != 0) {
983 error = _bus_dmamap_reserve_pages(dmat, map, flags);
984 if (error)

--- 84 unchanged lines hidden (view full) ---

1069}
1070
1071bus_dma_segment_t *
1072_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1073 bus_dma_segment_t *segs, int nsegs, int error)
1074{
1075
1076 if (segs == NULL)
1150 segs = dmat->segments;
1077 segs = map->segments;
1151 return (segs);
1152}
1153
1154/*
1155 * Release the mapping held by map.
1156 */
1157void
1158_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1159{
1160 struct bounce_page *bpage;
1078 return (segs);
1079}
1080
1081/*
1082 * Release the mapping held by map.
1083 */
1084void
1085_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1086{
1087 struct bounce_page *bpage;
1088 struct bounce_zone *bz;
1161
1089
1162 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1163 STAILQ_REMOVE_HEAD(&map->bpages, links);
1164 free_bounce_page(dmat, bpage);
1090 if ((bz = dmat->bounce_zone) != NULL) {
1091 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1092 STAILQ_REMOVE_HEAD(&map->bpages, links);
1093 free_bounce_page(dmat, bpage);
1094 }
1095
1096 bz = dmat->bounce_zone;
1097 bz->free_bpages += map->pagesreserved;
1098 bz->reserved_bpages -= map->pagesreserved;
1099 map->pagesreserved = 0;
1100 map->pagesneeded = 0;
1165 }
1166 map->sync_count = 0;
1101 }
1102 map->sync_count = 0;
1103 map->flags &= ~DMAMAP_MBUF;
1167}
1168
1169static void
1170bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op,
1171 int bufaligned)
1172{
1173 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1174 register_t s;

--- 421 unchanged lines hidden ---
1104}
1105
1106static void
1107bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op,
1108 int bufaligned)
1109{
1110 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1111 register_t s;

--- 421 unchanged lines hidden ---