Deleted Added
full compact
busdma_machdep-v6.c (243909) busdma_machdep-v6.c (244469)
1/*-
1/*-
2 * Copyright (c) 2012 Ian Lepore
2 * Copyright (c) 2010 Mark Tinguely
3 * Copyright (c) 2004 Olivier Houchard
4 * Copyright (c) 2002 Peter Grehan
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions

--- 15 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
30 */
31
32#include <sys/cdefs.h>
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions

--- 15 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 243909 2012-12-05 21:07:27Z cognet $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 244469 2012-12-20 00:35:26Z cognet $");
34
35#define _ARM32_BUS_DMA_PRIVATE
36#include <sys/param.h>
37#include <sys/kdb.h>
38#include <ddb/ddb.h>
39#include <ddb/db_output.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>
43#include <sys/bus.h>
44#include <sys/busdma_bufalloc.h>
43#include <sys/interrupt.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/proc.h>
48#include <sys/mutex.h>
49#include <sys/mbuf.h>
50#include <sys/uio.h>
51#include <sys/sysctl.h>
52
53#include <vm/vm.h>
54#include <vm/vm_page.h>
55#include <vm/vm_map.h>
45#include <sys/interrupt.h>
46#include <sys/kernel.h>
47#include <sys/ktr.h>
48#include <sys/lock.h>
49#include <sys/proc.h>
50#include <sys/mutex.h>
51#include <sys/mbuf.h>
52#include <sys/uio.h>
53#include <sys/sysctl.h>
54
55#include <vm/vm.h>
56#include <vm/vm_page.h>
57#include <vm/vm_map.h>
58#include <vm/vm_extern.h>
59#include <vm/vm_kern.h>
56
57#include <machine/atomic.h>
58#include <machine/bus.h>
59#include <machine/cpufunc.h>
60#include <machine/md_var.h>
61
62#define MAX_BPAGES 64
63#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3

--- 14 unchanged lines hidden (view full) ---

78 bus_size_t maxsize;
79 u_int nsegments;
80 bus_size_t maxsegsz;
81 int flags;
82 int ref_count;
83 int map_count;
84 bus_dma_lock_t *lockfunc;
85 void *lockfuncarg;
60
61#include <machine/atomic.h>
62#include <machine/bus.h>
63#include <machine/cpufunc.h>
64#include <machine/md_var.h>
65
66#define MAX_BPAGES 64
67#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3

--- 14 unchanged lines hidden (view full) ---

82 bus_size_t maxsize;
83 u_int nsegments;
84 bus_size_t maxsegsz;
85 int flags;
86 int ref_count;
87 int map_count;
88 bus_dma_lock_t *lockfunc;
89 void *lockfuncarg;
86 bus_dma_segment_t *segments;
87 struct bounce_zone *bounce_zone;
88 /*
89 * DMA range for this tag. If the page doesn't fall within
90 * one of these ranges, an error is returned. The caller
91 * may then decide what to do with the transfer. If the
92 * range pointer is NULL, it is ignored.
93 */
94 struct arm32_dma_range *ranges;
95 int _nranges;
90 struct bounce_zone *bounce_zone;
91 /*
92 * DMA range for this tag. If the page doesn't fall within
93 * one of these ranges, an error is returned. The caller
94 * may then decide what to do with the transfer. If the
95 * range pointer is NULL, it is ignored.
96 */
97 struct arm32_dma_range *ranges;
98 int _nranges;
99 /*
100 * Most tags need one or two segments, and can use the local tagsegs
101 * array. For tags with a larger limit, we'll allocate a bigger array
102 * on first use.
103 */
104 bus_dma_segment_t *segments;
105 bus_dma_segment_t tagsegs[2];
96
106
107
97};
98
99struct bounce_page {
100 vm_offset_t vaddr; /* kva of bounce buffer */
101 bus_addr_t busaddr; /* Physical address */
102 vm_offset_t datavaddr; /* kva of client data */
103 bus_size_t datacount; /* client data count */
104 STAILQ_ENTRY(bounce_page) links;

--- 40 unchanged lines hidden (view full) ---

145 int pagesneeded;
146 int pagesreserved;
147 bus_dma_tag_t dmat;
148 void *buf; /* unmapped buffer pointer */
149 bus_size_t buflen; /* unmapped buffer length */
150 pmap_t pmap;
151 bus_dmamap_callback_t *callback;
152 void *callback_arg;
108};
109
110struct bounce_page {
111 vm_offset_t vaddr; /* kva of bounce buffer */
112 bus_addr_t busaddr; /* Physical address */
113 vm_offset_t datavaddr; /* kva of client data */
114 bus_size_t datacount; /* client data count */
115 STAILQ_ENTRY(bounce_page) links;

--- 40 unchanged lines hidden (view full) ---

156 int pagesneeded;
157 int pagesreserved;
158 bus_dma_tag_t dmat;
159 void *buf; /* unmapped buffer pointer */
160 bus_size_t buflen; /* unmapped buffer length */
161 pmap_t pmap;
162 bus_dmamap_callback_t *callback;
163 void *callback_arg;
164 int flags;
165#define DMAMAP_COHERENT (1 << 0)
153 STAILQ_ENTRY(bus_dmamap) links;
154 STAILQ_HEAD(,sync_list) slist;
155};
156
157static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
158static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
159
160static void init_bounce_pages(void *dummy);
161static int alloc_bounce_zone(bus_dma_tag_t dmat);
162static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
163static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
164 int commit);
165static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
166 vm_offset_t vaddr, bus_size_t size);
167static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
169static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
170 void *buf, bus_size_t buflen, int flags);
171
166 STAILQ_ENTRY(bus_dmamap) links;
167 STAILQ_HEAD(,sync_list) slist;
168};
169
170static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
171static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
172
173static void init_bounce_pages(void *dummy);
174static int alloc_bounce_zone(bus_dma_tag_t dmat);
175static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
176static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
177 int commit);
178static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
179 vm_offset_t vaddr, bus_size_t size);
180static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
181int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
182static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
183 void *buf, bus_size_t buflen, int flags);
184
185static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
186static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
187static void
188busdma_init(void *dummy)
189{
190
191 /* Create a cache of buffers in standard (cacheable) memory. */
192 standard_allocator = busdma_bufalloc_create("buffer",
193 arm_dcache_align, /* minimum_alignment */
194 NULL, /* uma_alloc func */
195 NULL, /* uma_free func */
196 0); /* uma_zcreate_flags */
197
198 /*
199 * Create a cache of buffers in uncacheable memory, to implement the
200 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
201 */
202 coherent_allocator = busdma_bufalloc_create("coherent",
203 arm_dcache_align, /* minimum_alignment */
204 busdma_bufalloc_alloc_uncacheable,
205 busdma_bufalloc_free_uncacheable,
206 0); /* uma_zcreate_flags */
207}
208
209/*
210 * This init historically used SI_SUB_VM, but now the init code requires
211 * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
212 * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
213 * SI_SUB_KMEM and SI_ORDER_THIRD.
214 */
215SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
216
172static __inline int
173_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
174{
175 int i;
176 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
177 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
178 || (lowaddr < phys_avail[i] &&
179 highaddr > phys_avail[i]))

--- 137 unchanged lines hidden (view full) ---

317 newtag->_nranges = bus_dma_get_range_nb();
318 if (lockfunc != NULL) {
319 newtag->lockfunc = lockfunc;
320 newtag->lockfuncarg = lockfuncarg;
321 } else {
322 newtag->lockfunc = dflt_lock;
323 newtag->lockfuncarg = NULL;
324 }
217static __inline int
218_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
219{
220 int i;
221 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
222 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
223 || (lowaddr < phys_avail[i] &&
224 highaddr > phys_avail[i]))

--- 137 unchanged lines hidden (view full) ---

362 newtag->_nranges = bus_dma_get_range_nb();
363 if (lockfunc != NULL) {
364 newtag->lockfunc = lockfunc;
365 newtag->lockfuncarg = lockfuncarg;
366 } else {
367 newtag->lockfunc = dflt_lock;
368 newtag->lockfuncarg = NULL;
369 }
325 newtag->segments = NULL;
370 /*
371 * If all the segments we need fit into the local tagsegs array, set the
372 * pointer now. Otherwise NULL the pointer and an array of segments
373 * will be allocated later, on first use. We don't pre-allocate now
374 * because some tags exist just to pass contraints to children in the
375 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
376 * sure don't want to try to allocate an array for that.
377 */
378 if (newtag->nsegments <= nitems(newtag->tagsegs))
379 newtag->segments = newtag->tagsegs;
380 else
381 newtag->segments = NULL;
326
327 /* Take into account any restrictions imposed by our parent tag */
328 if (parent != NULL) {
329 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
330 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
331 if (newtag->boundary == 0)
332 newtag->boundary = parent->boundary;
333 else if (parent->boundary != 0)

--- 72 unchanged lines hidden (view full) ---

406 }
407
408 while (dmat != NULL) {
409 bus_dma_tag_t parent;
410
411 parent = dmat->parent;
412 atomic_subtract_int(&dmat->ref_count, 1);
413 if (dmat->ref_count == 0) {
382
383 /* Take into account any restrictions imposed by our parent tag */
384 if (parent != NULL) {
385 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
386 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
387 if (newtag->boundary == 0)
388 newtag->boundary = parent->boundary;
389 else if (parent->boundary != 0)

--- 72 unchanged lines hidden (view full) ---

462 }
463
464 while (dmat != NULL) {
465 bus_dma_tag_t parent;
466
467 parent = dmat->parent;
468 atomic_subtract_int(&dmat->ref_count, 1);
469 if (dmat->ref_count == 0) {
414 if (dmat->segments != NULL)
470 if (dmat->segments != NULL &&
471 dmat->segments != dmat->tagsegs)
415 free(dmat->segments, M_DEVBUF);
416 free(dmat, M_DEVBUF);
417 /*
418 * Last reference count, so
419 * release our reference
420 * count on our parent.
421 */
422 dmat = parent;

--- 117 unchanged lines hidden (view full) ---

540 * Allocate a piece of memory that can be efficiently mapped into
541 * bus device space based on the constraints lited in the dma tag.
542 * A dmamap to for use with dmamap_load is also allocated.
543 */
544int
545bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
546 bus_dmamap_t *mapp)
547{
472 free(dmat->segments, M_DEVBUF);
473 free(dmat, M_DEVBUF);
474 /*
475 * Last reference count, so
476 * release our reference
477 * count on our parent.
478 */
479 dmat = parent;

--- 117 unchanged lines hidden (view full) ---

597 * Allocate a piece of memory that can be efficiently mapped into
598 * bus device space based on the constraints lited in the dma tag.
599 * A dmamap to for use with dmamap_load is also allocated.
600 */
601int
602bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
603 bus_dmamap_t *mapp)
604{
548 int mflags, len;
605 busdma_bufalloc_t ba;
606 struct busdma_bufzone *bufzone;
607 vm_memattr_t memattr;
608 int mflags;
549
550 if (flags & BUS_DMA_NOWAIT)
551 mflags = M_NOWAIT;
552 else
553 mflags = M_WAITOK;
554
555 /* ARM non-snooping caches need a map for the VA cache sync structure */
556

--- 17 unchanged lines hidden (view full) ---

574 free(*mapp, M_DEVBUF);
575 *mapp = NULL;
576 return (ENOMEM);
577 }
578 }
579
580 if (flags & BUS_DMA_ZERO)
581 mflags |= M_ZERO;
609
610 if (flags & BUS_DMA_NOWAIT)
611 mflags = M_NOWAIT;
612 else
613 mflags = M_WAITOK;
614
615 /* ARM non-snooping caches need a map for the VA cache sync structure */
616

--- 17 unchanged lines hidden (view full) ---

634 free(*mapp, M_DEVBUF);
635 *mapp = NULL;
636 return (ENOMEM);
637 }
638 }
639
640 if (flags & BUS_DMA_ZERO)
641 mflags |= M_ZERO;
642 if (flags & BUS_DMA_COHERENT) {
643 memattr = VM_MEMATTR_UNCACHEABLE;
644 ba = coherent_allocator;
645 (*mapp)->flags |= DMAMAP_COHERENT;
646 } else {
647 memattr = VM_MEMATTR_DEFAULT;
648 ba = standard_allocator;
649 (*mapp)->flags = 0;
650 }
651#ifdef notyet
652 /* All buffers we allocate are cache-aligned. */
653 map->flags |= DMAMAP_CACHE_ALIGNED;
654#endif
582
655
583 /*
584 * XXX:
585 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
586 * alignment guarantees of malloc need to be nailed down, and the
587 * code below should be rewritten to take that into account.
588 *
589 * In the meantime, we'll warn the user if malloc gets it wrong.
590 *
591 * allocate at least a cache line. This should help avoid cache
592 * corruption.
656 /*
657 * Try to find a bufzone in the allocator that holds a cache of buffers
658 * of the right size for this request. If the buffer is too big to be
659 * held in the allocator cache, this returns NULL.
593 */
660 */
594 len = max(dmat->maxsize, arm_dcache_align);
595 if (len <= PAGE_SIZE &&
596 (dmat->alignment < len) &&
597 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
598 *vaddr = malloc(len, M_DEVBUF, mflags);
661 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
662
663 /*
664 * Allocate the buffer from the uma(9) allocator if...
665 * - It's small enough to be in the allocator (bufzone not NULL).
666 * - The alignment constraint isn't larger than the allocation size
667 * (the allocator aligns buffers to their size boundaries).
668 * - There's no need to handle lowaddr/highaddr exclusion zones.
669 * else allocate non-contiguous pages if...
670 * - The page count that could get allocated doesn't exceed nsegments.
671 * - The alignment constraint isn't larger than a page boundary.
672 * - There are no boundary-crossing constraints.
673 * else allocate a block of contiguous pages because one or more of the
674 * constraints is something that only the contig allocator can fulfill.
675 */
676 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
677 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
678 *vaddr = uma_zalloc(bufzone->umazone, mflags);
679 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
680 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
681 *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
682 mflags, 0, dmat->lowaddr, memattr);
599 } else {
683 } else {
600 /*
601 * XXX Use Contigmalloc until it is merged into this facility
602 * and handles multi-seg allocations. Nobody is doing
603 * multi-seg allocations yet though.
604 * XXX Certain AGP hardware does.
605 */
606 *vaddr = contigmalloc(len, M_DEVBUF, mflags,
607 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
608 dmat->boundary);
684 *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
685 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
686 memattr);
609 }
687 }
688
689
610 if (*vaddr == NULL) {
611 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
612 __func__, dmat, dmat->flags, ENOMEM);
613 free(*mapp, M_DEVBUF);
614 *mapp = NULL;
615 return (ENOMEM);
616 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
617 printf("bus_dmamem_alloc failed to align memory properly.\n");

--- 7 unchanged lines hidden (view full) ---

625
626/*
627 * Free a piece of memory and it's allociated dmamap, that was allocated
628 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
629 */
630void
631bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
632{
690 if (*vaddr == NULL) {
691 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
692 __func__, dmat, dmat->flags, ENOMEM);
693 free(*mapp, M_DEVBUF);
694 *mapp = NULL;
695 return (ENOMEM);
696 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
697 printf("bus_dmamem_alloc failed to align memory properly.\n");

--- 7 unchanged lines hidden (view full) ---

705
706/*
707 * Free a piece of memory and it's allociated dmamap, that was allocated
708 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
709 */
710void
711bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
712{
633 int len;
713 struct busdma_bufzone *bufzone;
714 busdma_bufalloc_t ba;
634
715
635#ifdef mftnotyet
636 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, ARM_WRITE_BACK);
637#endif
638 len = max(dmat->maxsize, arm_dcache_align);
639 if (len <= PAGE_SIZE &&
640 (dmat->alignment < len) &&
641 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
642 free(vaddr, M_DEVBUF);
643 else {
644 contigfree(vaddr, len, M_DEVBUF);
645 }
716 if (map->flags & DMAMAP_COHERENT)
717 ba = coherent_allocator;
718 else
719 ba = standard_allocator;
720
721 /* Be careful not to access map from here on. */
722
723 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
724
725 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
726 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
727 uma_zfree(bufzone->umazone, vaddr);
728 else
729 kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
730
646 dmat->map_count--;
647 free(map, M_DEVBUF);
648 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
649}
650
651static int
652_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
653 void *buf, bus_size_t buflen, int flags)

--- 518 unchanged lines hidden (view full) ---

1172 bcopy((void *)bpage->vaddr,
1173 (void *)bpage->datavaddr,
1174 bpage->datacount);
1175 bpage = STAILQ_NEXT(bpage, links);
1176 }
1177 dmat->bounce_zone->total_bounced++;
1178 }
1179 }
731 dmat->map_count--;
732 free(map, M_DEVBUF);
733 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
734}
735
736static int
737_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
738 void *buf, bus_size_t buflen, int flags)

--- 518 unchanged lines hidden (view full) ---

1257 bcopy((void *)bpage->vaddr,
1258 (void *)bpage->datavaddr,
1259 bpage->datacount);
1260 bpage = STAILQ_NEXT(bpage, links);
1261 }
1262 dmat->bounce_zone->total_bounced++;
1263 }
1264 }
1265 if (map->flags & DMAMAP_COHERENT)
1266 return;
1180
1181 sl = STAILQ_FIRST(&map->slist);
1182 while (sl) {
1183 listcount++;
1184 sl = STAILQ_NEXT(sl, slinks);
1185 }
1186 if ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
1187 /* ARM caches are not self-snooping for dma */

--- 367 unchanged lines hidden ---
1267
1268 sl = STAILQ_FIRST(&map->slist);
1269 while (sl) {
1270 listcount++;
1271 sl = STAILQ_NEXT(sl, slinks);
1272 }
1273 if ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
1274 /* ARM caches are not self-snooping for dma */

--- 367 unchanged lines hidden ---