Deleted Added
full compact
busdma_machdep-v6.c (269215) busdma_machdep-v6.c (269216)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269215 2014-07-29 02:37:31Z ian $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269216 2014-07-29 02:37:48Z ian $");
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 16 unchanged lines hidden (view full) ---

59#include <vm/vm_kern.h>
60
61#include <machine/atomic.h>
62#include <machine/bus.h>
63#include <machine/cpufunc.h>
64#include <machine/md_var.h>
65
66#define MAX_BPAGES 64
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 16 unchanged lines hidden (view full) ---

59#include <vm/vm_kern.h>
60
61#include <machine/atomic.h>
62#include <machine/bus.h>
63#include <machine/cpufunc.h>
64#include <machine/md_var.h>
65
66#define MAX_BPAGES 64
67#define MAX_DMA_SEGMENTS 4096
67#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2
68#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3
69#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
70#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
71
72struct bounce_zone;
73
74struct bus_dma_tag {

--- 16 unchanged lines hidden (view full) ---

91 /*
92 * DMA range for this tag. If the page doesn't fall within
93 * one of these ranges, an error is returned. The caller
94 * may then decide what to do with the transfer. If the
95 * range pointer is NULL, it is ignored.
96 */
97 struct arm32_dma_range *ranges;
98 int _nranges;
68#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2
69#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3
70#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
71#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
72
73struct bounce_zone;
74
75struct bus_dma_tag {

--- 16 unchanged lines hidden (view full) ---

92 /*
93 * DMA range for this tag. If the page doesn't fall within
94 * one of these ranges, an error is returned. The caller
95 * may then decide what to do with the transfer. If the
96 * range pointer is NULL, it is ignored.
97 */
98 struct arm32_dma_range *ranges;
99 int _nranges;
99 /*
100 * Most tags need one or two segments, and can use the local tagsegs
101 * array. For tags with a larger limit, we'll allocate a bigger array
102 * on first use.
103 */
104 bus_dma_segment_t *segments;
105 bus_dma_segment_t tagsegs[2];
106
107
108};
109
110struct bounce_page {
111 vm_offset_t vaddr; /* kva of bounce buffer */
112 bus_addr_t busaddr; /* Physical address */
113 vm_offset_t datavaddr; /* kva of client data */
114 bus_addr_t dataaddr; /* client physical address */
115 bus_size_t datacount; /* client data count */

--- 44 unchanged lines hidden (view full) ---

160 pmap_t pmap;
161 bus_dmamap_callback_t *callback;
162 void *callback_arg;
163 int flags;
164#define DMAMAP_COHERENT (1 << 0)
165#define DMAMAP_DMAMEM_ALLOC (1 << 1)
166#define DMAMAP_MBUF (1 << 2)
167 STAILQ_ENTRY(bus_dmamap) links;
100};
101
102struct bounce_page {
103 vm_offset_t vaddr; /* kva of bounce buffer */
104 bus_addr_t busaddr; /* Physical address */
105 vm_offset_t datavaddr; /* kva of client data */
106 bus_addr_t dataaddr; /* client physical address */
107 bus_size_t datacount; /* client data count */

--- 44 unchanged lines hidden (view full) ---

152 pmap_t pmap;
153 bus_dmamap_callback_t *callback;
154 void *callback_arg;
155 int flags;
156#define DMAMAP_COHERENT (1 << 0)
157#define DMAMAP_DMAMEM_ALLOC (1 << 1)
158#define DMAMAP_MBUF (1 << 2)
159 STAILQ_ENTRY(bus_dmamap) links;
160 bus_dma_segment_t *segments;
168 int sync_count;
169 struct sync_list slist[];
170};
171
172static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
173static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
174
175static void init_bounce_pages(void *dummy);

--- 295 unchanged lines hidden (view full) ---

471 newtag->_nranges = bus_dma_get_range_nb();
472 if (lockfunc != NULL) {
473 newtag->lockfunc = lockfunc;
474 newtag->lockfuncarg = lockfuncarg;
475 } else {
476 newtag->lockfunc = dflt_lock;
477 newtag->lockfuncarg = NULL;
478 }
161 int sync_count;
162 struct sync_list slist[];
163};
164
165static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
166static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
167
168static void init_bounce_pages(void *dummy);

--- 295 unchanged lines hidden (view full) ---

464 newtag->_nranges = bus_dma_get_range_nb();
465 if (lockfunc != NULL) {
466 newtag->lockfunc = lockfunc;
467 newtag->lockfuncarg = lockfuncarg;
468 } else {
469 newtag->lockfunc = dflt_lock;
470 newtag->lockfuncarg = NULL;
471 }
479 /*
480 * If all the segments we need fit into the local tagsegs array, set the
481 * pointer now. Otherwise NULL the pointer and an array of segments
482 * will be allocated later, on first use. We don't pre-allocate now
483 * because some tags exist just to pass contraints to children in the
484 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
485 * sure don't want to try to allocate an array for that.
486 */
487 if (newtag->nsegments <= nitems(newtag->tagsegs))
488 newtag->segments = newtag->tagsegs;
489 else
490 newtag->segments = NULL;
491
492 /* Take into account any restrictions imposed by our parent tag */
493 if (parent != NULL) {
494 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
495 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
496 newtag->alignment = MAX(parent->alignment, newtag->alignment);
497 newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
498 if (newtag->boundary == 0)

--- 80 unchanged lines hidden (view full) ---

579 }
580
581 while (dmat != NULL) {
582 bus_dma_tag_t parent;
583
584 parent = dmat->parent;
585 atomic_subtract_int(&dmat->ref_count, 1);
586 if (dmat->ref_count == 0) {
472
473 /* Take into account any restrictions imposed by our parent tag */
474 if (parent != NULL) {
475 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
476 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
477 newtag->alignment = MAX(parent->alignment, newtag->alignment);
478 newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
479 if (newtag->boundary == 0)

--- 80 unchanged lines hidden (view full) ---

560 }
561
562 while (dmat != NULL) {
563 bus_dma_tag_t parent;
564
565 parent = dmat->parent;
566 atomic_subtract_int(&dmat->ref_count, 1);
567 if (dmat->ref_count == 0) {
587 if (dmat->segments != NULL &&
588 dmat->segments != dmat->tagsegs)
589 free(dmat->segments, M_DEVBUF);
590 free(dmat, M_DEVBUF);
591 /*
592 * Last reference count, so
593 * release our reference
594 * count on our parent.
595 */
596 dmat = parent;
597 } else

--- 40 unchanged lines hidden (view full) ---

638
639 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
640 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
641 }
642 bz->map_count++;
643 return (0);
644}
645
568 free(dmat, M_DEVBUF);
569 /*
570 * Last reference count, so
571 * release our reference
572 * count on our parent.
573 */
574 dmat = parent;
575 } else

--- 40 unchanged lines hidden (view full) ---

616
617 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
618 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
619 }
620 bz->map_count++;
621 return (0);
622}
623
624static bus_dmamap_t
625allocate_map(bus_dma_tag_t dmat, int mflags)
626{
627 int mapsize, segsize;
628 bus_dmamap_t map;
629
630 /*
631 * Allocate the map. The map structure ends with an embedded
632 * variable-sized array of sync_list structures. Following that
633 * we allocate enough extra space to hold the array of bus_dma_segments.
634 */
635 KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
636 ("cannot allocate %u dma segments (max is %u)",
637 dmat->nsegments, MAX_DMA_SEGMENTS));
638 segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
639 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
640 map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO);
641 if (map == NULL) {
642 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
643 return (NULL);
644 }
645 map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
646 return (map);
647}
648
646/*
647 * Allocate a handle for mapping from kva/uva/physical
648 * address space into bus device space.
649 */
650int
651bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
652{
653 bus_dmamap_t map;
649/*
650 * Allocate a handle for mapping from kva/uva/physical
651 * address space into bus device space.
652 */
653int
654bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
655{
656 bus_dmamap_t map;
654 int mapsize;
655 int error = 0;
656
657 int error = 0;
658
657 mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
658 *mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
659 *mapp = map = allocate_map(dmat, M_NOWAIT);
659 if (map == NULL) {
660 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
661 return (ENOMEM);
662 }
660 if (map == NULL) {
661 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
662 return (ENOMEM);
663 }
663 map->sync_count = 0;
664
664
665 if (dmat->segments == NULL) {
666 dmat->segments = (bus_dma_segment_t *)malloc(
667 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
668 M_NOWAIT);
669 if (dmat->segments == NULL) {
670 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
671 __func__, dmat, ENOMEM);
672 free(map, M_DEVBUF);
673 *mapp = NULL;
674 return (ENOMEM);
675 }
676 }
677 /*
665 /*
678 * Bouncing might be required if the driver asks for an active
679 * exclusion region, a data alignment that is stricter than 1, and/or
680 * an active address boundary.
666 * Bouncing might be required if the driver asks for an exclusion
667 * region, a data alignment that is stricter than 1, or DMA that begins
668 * or ends with a partial cacheline. Whether bouncing will actually
669 * happen can't be known until mapping time, but we need to pre-allocate
670 * resources now because we might not be allowed to at mapping time.
681 */
682 error = allocate_bz_and_pages(dmat, map);
683 if (error != 0) {
684 free(map, M_DEVBUF);
685 *mapp = NULL;
686 return (error);
687 }
688 return (error);

--- 29 unchanged lines hidden (view full) ---

718bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
719 bus_dmamap_t *mapp)
720{
721 busdma_bufalloc_t ba;
722 struct busdma_bufzone *bufzone;
723 bus_dmamap_t map;
724 vm_memattr_t memattr;
725 int mflags;
671 */
672 error = allocate_bz_and_pages(dmat, map);
673 if (error != 0) {
674 free(map, M_DEVBUF);
675 *mapp = NULL;
676 return (error);
677 }
678 return (error);

--- 29 unchanged lines hidden (view full) ---

708bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
709 bus_dmamap_t *mapp)
710{
711 busdma_bufalloc_t ba;
712 struct busdma_bufzone *bufzone;
713 bus_dmamap_t map;
714 vm_memattr_t memattr;
715 int mflags;
726 int mapsize;
727 int error;
728
729 if (flags & BUS_DMA_NOWAIT)
730 mflags = M_NOWAIT;
731 else
732 mflags = M_WAITOK;
716
717 if (flags & BUS_DMA_NOWAIT)
718 mflags = M_NOWAIT;
719 else
720 mflags = M_WAITOK;
721 if (flags & BUS_DMA_ZERO)
722 mflags |= M_ZERO;
733
723
734 /* ARM non-snooping caches need a map for the VA cache sync structure */
735
736 mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
737 *mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
724 *mapp = map = allocate_map(dmat, mflags);
738 if (map == NULL) {
739 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
740 __func__, dmat, dmat->flags, ENOMEM);
741 return (ENOMEM);
742 }
743 map->flags = DMAMAP_DMAMEM_ALLOC;
725 if (map == NULL) {
726 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
727 __func__, dmat, dmat->flags, ENOMEM);
728 return (ENOMEM);
729 }
730 map->flags = DMAMAP_DMAMEM_ALLOC;
744 map->sync_count = 0;
745
731
746 if (dmat->segments == NULL) {
747 dmat->segments = (bus_dma_segment_t *)malloc(
748 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
749 mflags);
750 if (dmat->segments == NULL) {
751 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
752 __func__, dmat, dmat->flags, ENOMEM);
753 free(map, M_DEVBUF);
754 *mapp = NULL;
755 return (ENOMEM);
756 }
757 }
758
759 if (flags & BUS_DMA_ZERO)
760 mflags |= M_ZERO;
732 /* Choose a busdma buffer allocator based on memory type flags. */
761 if (flags & BUS_DMA_COHERENT) {
762 memattr = VM_MEMATTR_UNCACHEABLE;
763 ba = coherent_allocator;
764 map->flags |= DMAMAP_COHERENT;
765 } else {
766 memattr = VM_MEMATTR_DEFAULT;
767 ba = standard_allocator;
768 }

--- 242 unchanged lines hidden (view full) ---

1011 bus_dma_segment_t *segs,
1012 int *segp)
1013{
1014 bus_addr_t curaddr;
1015 bus_size_t sgsize;
1016 int error;
1017
1018 if (segs == NULL)
733 if (flags & BUS_DMA_COHERENT) {
734 memattr = VM_MEMATTR_UNCACHEABLE;
735 ba = coherent_allocator;
736 map->flags |= DMAMAP_COHERENT;
737 } else {
738 memattr = VM_MEMATTR_DEFAULT;
739 ba = standard_allocator;
740 }

--- 242 unchanged lines hidden (view full) ---

983 bus_dma_segment_t *segs,
984 int *segp)
985{
986 bus_addr_t curaddr;
987 bus_size_t sgsize;
988 int error;
989
990 if (segs == NULL)
1019 segs = dmat->segments;
991 segs = map->segments;
1020
1021 if (might_bounce(dmat, map, buflen, buflen)) {
1022 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1023 if (map->pagesneeded != 0) {
1024 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1025 if (error)
1026 return (error);
1027 }

--- 51 unchanged lines hidden (view full) ---

1079{
1080 bus_size_t sgsize;
1081 bus_addr_t curaddr;
1082 vm_offset_t vaddr;
1083 struct sync_list *sl;
1084 int error;
1085
1086 if (segs == NULL)
992
993 if (might_bounce(dmat, map, buflen, buflen)) {
994 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
995 if (map->pagesneeded != 0) {
996 error = _bus_dmamap_reserve_pages(dmat, map, flags);
997 if (error)
998 return (error);
999 }

--- 51 unchanged lines hidden (view full) ---

1051{
1052 bus_size_t sgsize;
1053 bus_addr_t curaddr;
1054 vm_offset_t vaddr;
1055 struct sync_list *sl;
1056 int error;
1057
1058 if (segs == NULL)
1087 segs = dmat->segments;
1059 segs = map->segments;
1088
1089 if (flags & BUS_DMA_LOAD_MBUF)
1090 map->flags |= DMAMAP_MBUF;
1091
1092 map->pmap = pmap;
1093
1094 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1095 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);

--- 78 unchanged lines hidden (view full) ---

1174}
1175
1176bus_dma_segment_t *
1177_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1178 bus_dma_segment_t *segs, int nsegs, int error)
1179{
1180
1181 if (segs == NULL)
1060
1061 if (flags & BUS_DMA_LOAD_MBUF)
1062 map->flags |= DMAMAP_MBUF;
1063
1064 map->pmap = pmap;
1065
1066 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1067 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);

--- 78 unchanged lines hidden (view full) ---

1146}
1147
1148bus_dma_segment_t *
1149_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1150 bus_dma_segment_t *segs, int nsegs, int error)
1151{
1152
1153 if (segs == NULL)
1182 segs = dmat->segments;
1154 segs = map->segments;
1183 return (segs);
1184}
1185
1186/*
1187 * Release the mapping held by map.
1188 */
1189void
1190_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)

--- 455 unchanged lines hidden ---
1155 return (segs);
1156}
1157
1158/*
1159 * Release the mapping held by map.
1160 */
1161void
1162_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)

--- 455 unchanged lines hidden ---