Deleted Added
full compact
busdma_machdep-v6.c (269136) busdma_machdep-v6.c (269206)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269136 2014-07-26 18:19:43Z ian $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269206 2014-07-29 02:31:29Z ian $");
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 193 unchanged lines hidden (view full) ---

236 * a request, and we can use any memory allocator (as opposed to needing
237 * kmem_alloc_contig() just because it can allocate pages in an address range).
238 *
239 * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
240 * same value on 32-bit architectures) as their lowaddr constraint, and we can't
241 * possibly have RAM at an address higher than the highest address we can
242 * express, so we take a fast out.
243 */
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 193 unchanged lines hidden (view full) ---

236 * a request, and we can use any memory allocator (as opposed to needing
237 * kmem_alloc_contig() just because it can allocate pages in an address range).
238 *
239 * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
240 * same value on 32-bit architectures) as their lowaddr constraint, and we can't
241 * possibly have RAM at an address higher than the highest address we can
242 * express, so we take a fast out.
243 */
244static __inline int
245_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
244static int
245exclusion_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
246{
247 int i;
248
249 if (lowaddr >= BUS_SPACE_MAXADDR)
250 return (0);
251
252 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
253 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
254 || (lowaddr < phys_avail[i] &&
255 highaddr > phys_avail[i]))
256 return (1);
257 }
258 return (0);
259}
260
246{
247 int i;
248
249 if (lowaddr >= BUS_SPACE_MAXADDR)
250 return (0);
251
252 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
253 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
254 || (lowaddr < phys_avail[i] &&
255 highaddr > phys_avail[i]))
256 return (1);
257 }
258 return (0);
259}
260
261/*
262 * Return true if the given address does not fall on the alignment boundary.
263 */
264static __inline int
265alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
266{
267
268 return (addr & (dmat->alignment - 1));
269}
270
271/*
272 * Return true if the buffer start or end does not fall on a cacheline boundary.
273 */
274static __inline int
275cacheline_bounce(bus_addr_t addr, bus_size_t size)
276{
277
278 return ((addr | size) & arm_dcache_align_mask);
279}
280
261static __inline struct arm32_dma_range *
262_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
263 bus_addr_t curaddr)
264{
265 struct arm32_dma_range *dr;
266 int i;
267
268 for (i = 0, dr = ranges; i < nranges; i++, dr++) {

--- 17 unchanged lines hidden (view full) ---

286run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent)
287{
288 int retval;
289
290 retval = 0;
291
292 do {
293 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
281static __inline struct arm32_dma_range *
282_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
283 bus_addr_t curaddr)
284{
285 struct arm32_dma_range *dr;
286 int i;
287
288 for (i = 0, dr = ranges; i < nranges; i++, dr++) {

--- 17 unchanged lines hidden (view full) ---

306run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent)
307{
308 int retval;
309
310 retval = 0;
311
312 do {
313 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
294 || ((paddr & (dmat->alignment - 1)) != 0) ||
295 (!coherent && (size & arm_dcache_align_mask)) ||
296 (!coherent && (paddr & arm_dcache_align_mask)))
314 || alignment_bounce(dmat, paddr) ||
315 (!coherent && cacheline_bounce(paddr, size)))
297 && (dmat->filter == NULL
298 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
299 retval = 1;
300
301 dmat = dmat->parent;
302 } while (retval == 0 && dmat != NULL);
303 return (retval);
304}

--- 128 unchanged lines hidden (view full) ---

433 newtag->filter = parent->filter;
434 newtag->filterarg = parent->filterarg;
435 newtag->parent = parent->parent;
436 }
437 if (newtag->parent != NULL)
438 atomic_add_int(&parent->ref_count, 1);
439 }
440
316 && (dmat->filter == NULL
317 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
318 retval = 1;
319
320 dmat = dmat->parent;
321 } while (retval == 0 && dmat != NULL);
322 return (retval);
323}

--- 128 unchanged lines hidden (view full) ---

452 newtag->filter = parent->filter;
453 newtag->filterarg = parent->filterarg;
454 newtag->parent = parent->parent;
455 }
456 if (newtag->parent != NULL)
457 atomic_add_int(&parent->ref_count, 1);
458 }
459
441 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
442 || newtag->alignment > 1)
460 if (exclusion_bounce(newtag->lowaddr, newtag->highaddr)
461 || alignment_bounce(newtag, 1))
443 newtag->flags |= BUS_DMA_COULD_BOUNCE;
444
445 /*
446 * Any request can auto-bounce due to cacheline alignment, in addition
447 * to any alignment or boundary specifications in the tag, so if the
448 * ALLOCNOW flag is set, there's always work to do.
449 */
450 if ((flags & BUS_DMA_ALLOCNOW) != 0) {

--- 262 unchanged lines hidden (view full) ---

713 * else allocate non-contiguous pages if...
714 * - The page count that could get allocated doesn't exceed nsegments.
715 * - The alignment constraint isn't larger than a page boundary.
716 * - There are no boundary-crossing constraints.
717 * else allocate a block of contiguous pages because one or more of the
718 * constraints is something that only the contig allocator can fulfill.
719 */
720 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
462 newtag->flags |= BUS_DMA_COULD_BOUNCE;
463
464 /*
465 * Any request can auto-bounce due to cacheline alignment, in addition
466 * to any alignment or boundary specifications in the tag, so if the
467 * ALLOCNOW flag is set, there's always work to do.
468 */
469 if ((flags & BUS_DMA_ALLOCNOW) != 0) {

--- 262 unchanged lines hidden (view full) ---

732 * else allocate non-contiguous pages if...
733 * - The page count that could get allocated doesn't exceed nsegments.
734 * - The alignment constraint isn't larger than a page boundary.
735 * - There are no boundary-crossing constraints.
736 * else allocate a block of contiguous pages because one or more of the
737 * constraints is something that only the contig allocator can fulfill.
738 */
739 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
721 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
740 !exclusion_bounce(dmat->lowaddr, dmat->highaddr)) {
722 *vaddr = uma_zalloc(bufzone->umazone, mflags);
723 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
724 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
725 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
726 mflags, 0, dmat->lowaddr, memattr);
727 } else {
728 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
729 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,

--- 30 unchanged lines hidden (view full) ---

760 else
761 ba = standard_allocator;
762
763 /* Be careful not to access map from here on. */
764
765 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
766
767 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
741 *vaddr = uma_zalloc(bufzone->umazone, mflags);
742 } else if (dmat->nsegments >= btoc(dmat->maxsize) &&
743 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
744 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
745 mflags, 0, dmat->lowaddr, memattr);
746 } else {
747 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
748 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,

--- 30 unchanged lines hidden (view full) ---

779 else
780 ba = standard_allocator;
781
782 /* Be careful not to access map from here on. */
783
784 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
785
786 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
768 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
787 !exclusion_bounce(dmat->lowaddr, dmat->highaddr))
769 uma_zfree(bufzone->umazone, vaddr);
770 else
771 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
772
773 dmat->map_count--;
774 free(map, M_DEVBUF);
775 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
776}

--- 806 unchanged lines hidden ---
788 uma_zfree(bufzone->umazone, vaddr);
789 else
790 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
791
792 dmat->map_count--;
793 free(map, M_DEVBUF);
794 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
795}

--- 806 unchanged lines hidden ---